code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Tuple=10 , UpperCAmelCase : Union[str, Any]=[10, 20, 30, 40] , UpperCAmelCase : Union[str, Any]=[1, 1, 2, 1] , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]="relu" , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[int]=None , ) -> Dict:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Tuple = embeddings_size
lowerCamelCase__ : Dict = hidden_sizes
lowerCamelCase__ : List[Any] = depths
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : Optional[int] = scope
lowerCamelCase__ : int = len(UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Tuple:
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Dict ) -> Tuple:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Any:
lowerCamelCase__ : List[str] = TFResNetModel(config=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ) -> Optional[int]:
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : List[str] = TFResNetForImageClassification(UpperCAmelCase )
lowerCamelCase__ : Any = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : Optional[Any] = TFResNetModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A_ ( self : int ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : List[Any] ) -> Optional[int]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def A_ ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def A_ ( self : List[Any] ) -> str:
pass
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : str ) -> int:
def check_hidden_states_output(UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
lowerCamelCase__ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCamelCase__ : Dict = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : str = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase__ : int = layer_type
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[str] ) -> List[str]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A_ ( self : List[str] ) -> Tuple:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : int ) -> List[str]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Union[str, Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
lowerCamelCase__ : Union[str, Any] = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase__ : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 50
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : List[Any] = abs(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Any = abs(_UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return sum(int(_UpperCAmelCase ) for c in str(abs(_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
lowerCamelCase__ : Dict = F"""{func.__name__}({value})"""
lowerCamelCase__ : Union[str, Any] = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(_UpperCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : int , **UpperCAmelCase : Any ) -> Tuple:
super().__init__(**UpperCAmelCase )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self : Any , UpperCAmelCase : Union[np.ndarray, bytes, str] , **UpperCAmelCase : Dict ) -> Tuple:
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Any , **UpperCAmelCase : Tuple ) -> Optional[int]:
lowerCamelCase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCamelCase__ : List[str] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def A_ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]="This is a sound of {}." ) -> List[str]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase__ : Dict = requests.get(UpperCAmelCase ).content
else:
with open(UpperCAmelCase , 'rb' ) as f:
lowerCamelCase__ : List[Any] = f.read()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = ffmpeg_read(UpperCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCAmelCase , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
lowerCamelCase__ : Optional[int] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
lowerCamelCase__ : Dict = candidate_labels
lowerCamelCase__ : Any = [hypothesis_template.format(UpperCAmelCase ) for x in candidate_labels]
lowerCamelCase__ : Optional[int] = self.tokenizer(UpperCAmelCase , return_tensors=self.framework , padding=UpperCAmelCase )
lowerCamelCase__ : Dict = [text_inputs]
return inputs
def A_ ( self : Any , UpperCAmelCase : Tuple ) -> List[str]:
lowerCamelCase__ : str = model_inputs.pop('candidate_labels' )
lowerCamelCase__ : Dict = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCAmelCase ):
lowerCamelCase__ : str = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ : Tuple = text_inputs[0][0]
lowerCamelCase__ : int = self.model(**UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = model_outputs.pop('candidate_labels' )
lowerCamelCase__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
lowerCamelCase__ : Optional[int] = logits.softmax(dim=0 )
lowerCamelCase__ : Tuple = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
lowerCamelCase__ : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase , UpperCAmelCase ) , key=lambda UpperCAmelCase : -x[0] )
]
return result
| 50
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 1
|
from __future__ import annotations
class lowerCAmelCase :
def __init__( self : str , UpperCAmelCase : int ) -> None:
lowerCamelCase__ : int = data
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
lowerCamelCase__ : Tuple = Node(1 )
lowerCamelCase__ : Tuple = Node(2 )
lowerCamelCase__ : List[Any] = Node(3 )
lowerCamelCase__ : Optional[Any] = Node(4 )
lowerCamelCase__ : Dict = Node(5 )
lowerCamelCase__ : Union[str, Any] = Node(6 )
lowerCamelCase__ : Optional[int] = Node(7 )
lowerCamelCase__ : Dict = Node(8 )
lowerCamelCase__ : List[Any] = Node(9 )
print(is_full_binary_tree(_UpperCAmelCase ) )
print(depth_of_tree(_UpperCAmelCase ) )
print('Tree is: ' )
display(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_UpperCAmelCase : Union[str, Any] = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowerCamelCase__ : str = self.transformer_dir
shutil.copy(
os.path.join(UpperCAmelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def A_ ( self : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def A_ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ) -> List[Any]:
lowerCamelCase__ : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase__ : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase__ : int = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
lowerCamelCase__ : List[str] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase )
with open(UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , UpperCAmelCase )
def A_ ( self : Optional[int] ) -> str:
lowerCamelCase__ : Optional[Any] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Dict:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , UpperCAmelCase ) , )
# Copy consistency with a really long name
lowerCamelCase__ : Tuple = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , UpperCAmelCase , UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , UpperCAmelCase , overwrite_result=re.sub('Bert' , 'TestModel' , UpperCAmelCase ) , )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCamelCase__ : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCamelCase__ : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase__ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme['format_model_list'] )
self.assertFalse(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCAmelCase )
lowerCamelCase__ : Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCamelCase__ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase__ : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase__ , lowerCamelCase__ : Any = check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_UpperCAmelCase : Optional[int] = """<<<<<<< This should probably be modified because it mentions: """
_UpperCAmelCase : List[Any] = """=======
>>>>>>>
"""
_UpperCAmelCase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
_UpperCAmelCase : str = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase ( __UpperCamelCase ):
@staticmethod
def A_ ( UpperCAmelCase : ArgumentParser ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=UpperCAmelCase )
def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : str , *UpperCAmelCase : int ) -> Optional[Any]:
lowerCamelCase__ : Any = get_logger('datasets-cli/converting' )
lowerCamelCase__ : Optional[Any] = tfds_path
lowerCamelCase__ : Any = datasets_directory
def A_ ( self : Dict ) -> str:
if os.path.isdir(self._tfds_path ):
lowerCamelCase__ : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCamelCase__ : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
lowerCamelCase__ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowerCamelCase__ : Any = []
lowerCamelCase__ : int = []
lowerCamelCase__ : List[str] = {}
if os.path.isdir(self._tfds_path ):
lowerCamelCase__ : Dict = os.listdir(UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not os.path.isfile(UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(UpperCAmelCase , encoding='utf-8' ) as f:
lowerCamelCase__ : Dict = f.readlines()
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = []
for line in lines:
lowerCamelCase__ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase__ : Tuple = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase__ : List[Any] = ''
continue
elif "from absl import logging" in out_line:
lowerCamelCase__ : List[Any] = 'from datasets import logging\n'
elif "getLogger" in out_line:
lowerCamelCase__ : Tuple = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCamelCase__ : str = True
lowerCamelCase__ : str = list(filter(lambda UpperCAmelCase : e in out_line , UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase ) + '\n' )
out_lines.append(UpperCAmelCase )
out_lines.append(UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase__ : Union[str, Any] = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase__ : Optional[int] = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
lowerCamelCase__ : Dict = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase__ : List[Any] = True
out_lines.append(UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase__ : List[Any] = f_name.replace('.py' , '' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCAmelCase , UpperCAmelCase )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(UpperCAmelCase )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowerCamelCase__ : List[Any] = os.path.basename(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCAmelCase , UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 50
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('google/mt5-small' )
lowerCamelCase__ : Any = tokenizer('Hello there' , return_tensors='pt' ).input_ids
lowerCamelCase__ : Tuple = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
lowerCamelCase__ : Optional[int] = model(input_ids.to(UpperCAmelCase ) , labels=labels.to(UpperCAmelCase ) ).loss
lowerCamelCase__ : Dict = -(labels.shape[-1] * loss.item())
lowerCamelCase__ : Optional[int] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 50
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
| 1
|
from math import factorial
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100 ) -> int:
return sum(map(_UpperCAmelCase , str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 50
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 1
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Dict=30 , UpperCAmelCase : str=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Union[str, Any]=10 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : int = image_size
lowerCamelCase__ : Tuple = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple = (image_size // patch_size) ** 2
lowerCamelCase__ : Dict = num_patches + 1
def A_ ( self : Optional[Any] ) -> int:
lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> Dict:
lowerCamelCase__ : Optional[Any] = ViTMSNModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any ) -> List[Any]:
lowerCamelCase__ : List[Any] = self.type_sequence_label_size
lowerCamelCase__ : Dict = ViTMSNForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Tuple = ViTMSNForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Any = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[str] ) -> List[Any]:
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ : Any = ViTMSNModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A_ ( self : int ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def A_ ( self : Optional[int] ) -> Any:
pass
def A_ ( self : Optional[int] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A_ ( self : List[str] ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Dict = [*signature.parameters.keys()]
lowerCamelCase__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> int:
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A_ ( self : int ) -> Any:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = ViTMSNModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : Dict ) -> List[str]:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def A_ ( self : Optional[int] ) -> List[str]:
torch.manual_seed(2 )
lowerCamelCase__ : Dict = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase__ : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase__ : int = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 50
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50
| 1
|
from __future__ import annotations
import math
_UpperCAmelCase : Any = """2020.9.26"""
_UpperCAmelCase : Optional[int] = """xcodz-dot, cclaus, dhruvmanila"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> tuple[float, float]:
if not all(isinstance(_UpperCAmelCase , (float, int) ) for val in locals().values() ):
lowerCamelCase__ : Any = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_UpperCAmelCase )
lowerCamelCase__ : str = ((x * distance) / (z + distance)) * scale
lowerCamelCase__ : Union[str, Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> tuple[float, float, float]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Axis must be a str' )
lowerCamelCase__ : List[str] = locals()
del input_variables["axis"]
if not all(isinstance(_UpperCAmelCase , (float, int) ) for val in input_variables.values() ):
lowerCamelCase__ : str = (
'Input values except axis must either be float or int: '
F"""{list(input_variables.values() )}"""
)
raise TypeError(_UpperCAmelCase )
lowerCamelCase__ : Any = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase__ : Dict = x * math.cos(_UpperCAmelCase ) - y * math.sin(_UpperCAmelCase )
lowerCamelCase__ : Dict = y * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
lowerCamelCase__ : int = z
elif axis == "x":
lowerCamelCase__ : Dict = y * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
lowerCamelCase__ : int = z * math.cos(_UpperCAmelCase ) + y * math.sin(_UpperCAmelCase )
lowerCamelCase__ : str = x
elif axis == "y":
lowerCamelCase__ : List[str] = x * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
lowerCamelCase__ : int = z * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
lowerCamelCase__ : Any = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 50
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase : str = logging.get_logger("""transformers.models.speecht5""")
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
hf_model.apply_weight_norm()
lowerCamelCase__ : str = checkpoint['input_conv.weight_g']
lowerCamelCase__ : List[Any] = checkpoint['input_conv.weight_v']
lowerCamelCase__ : Union[str, Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCamelCase__ : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
lowerCamelCase__ : Optional[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
lowerCamelCase__ : int = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCamelCase__ : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCamelCase__ : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCamelCase__ : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCamelCase__ : Tuple = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCamelCase__ : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCamelCase__ : List[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCamelCase__ : Tuple = checkpoint['output_conv.1.weight_g']
lowerCamelCase__ : Dict = checkpoint['output_conv.1.weight_v']
lowerCamelCase__ : int = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
if config_path is not None:
lowerCamelCase__ : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCamelCase__ : Any = SpeechTaHifiGanConfig()
lowerCamelCase__ : Tuple = SpeechTaHifiGan(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : List[str] = np.load(_UpperCAmelCase )
lowerCamelCase__ : Tuple = stats[0].reshape(-1 )
lowerCamelCase__ : Optional[Any] = stats[1].reshape(-1 )
lowerCamelCase__ : int = torch.from_numpy(_UpperCAmelCase ).float()
lowerCamelCase__ : Dict = torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 50
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : Optional[int] ) -> Dict:
super().setUp()
lowerCamelCase__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
lowerCamelCase__ : Union[str, Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def A_ ( self : List[str] , UpperCAmelCase : List[Any] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : str = self.get_input_output_texts(UpperCAmelCase )
lowerCamelCase__ : int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def A_ ( self : Dict ) -> List[str]:
pass # TODO add if relevant
def A_ ( self : List[Any] ) -> Dict:
pass # TODO add if relevant
def A_ ( self : Any ) -> Dict:
pass # TODO add if relevant
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self : Any ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(UpperCAmelCase )
lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。'
lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(UpperCAmelCase , 'wb' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as handle:
lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : Union[str, Any] ) -> Any:
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : int ) -> Union[str, Any]:
try:
lowerCamelCase__ : Optional[Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : str ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : int ) -> List[str]:
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(
do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : Any = MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def A_ ( self : List[Any] ) -> Optional[Any]:
lowerCamelCase__ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
lowerCamelCase__ : List[Any] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(UpperCAmelCase , 'wb' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as handle:
lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_sudachi
def A_ ( self : Dict ) -> int:
lowerCamelCase__ : Any = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def A_ ( self : str ) -> Optional[int]:
lowerCamelCase__ : List[str] = SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : int = SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(UpperCAmelCase )
lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。'
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(UpperCAmelCase , 'wb' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as handle:
lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_jumanpp
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : List[str] = JumanppTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self : Tuple ) -> List[str]:
lowerCamelCase__ : int = JumanppTokenizer(normalize_text=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : Optional[Any] = JumanppTokenizer(trim_whitespace=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
lowerCamelCase__ : int = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = i
lowerCamelCase__ : int = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : int = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
lowerCamelCase__ : List[str] = tokenizer.subword_tokenizer
lowerCamelCase__ : Any = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(UpperCAmelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
lowerCamelCase__ : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(UpperCAmelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def A_ ( self : Dict ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
lowerCamelCase__ : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
def A_ ( self : Dict ) -> Any:
super().setUp()
lowerCamelCase__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : List[Any] , **UpperCAmelCase : str ) -> List[str]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : str = 'こんにちは、世界。 \nこんばんは、世界。'
lowerCamelCase__ : List[str] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def A_ ( self : Optional[Any] ) -> List[Any]:
pass # TODO add if relevant
def A_ ( self : Tuple ) -> Union[str, Any]:
pass # TODO add if relevant
def A_ ( self : Optional[Any] ) -> Optional[int]:
pass # TODO add if relevant
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
lowerCamelCase__ : List[str] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
UpperCAmelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Any = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
lowerCamelCase__ : Optional[int] = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = i
lowerCamelCase__ : Optional[int] = CharacterTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def A_ ( self : Any ) -> str:
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
lowerCamelCase__ : List[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese'
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
lowerCamelCase__ : Optional[Any] = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 50
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 1
|
from __future__ import annotations
_UpperCAmelCase : Optional[int] = tuple[int, int, int]
_UpperCAmelCase : int = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_UpperCAmelCase : Optional[int] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_UpperCAmelCase : List[str] = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_UpperCAmelCase : Optional[int] = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_UpperCAmelCase : List[Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_UpperCAmelCase : Union[str, Any] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_UpperCAmelCase : List[str] = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_UpperCAmelCase : Any = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_UpperCAmelCase : List[str] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_UpperCAmelCase : Tuple = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_UpperCAmelCase : Optional[int] = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_UpperCAmelCase : Tuple = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
lowerCamelCase__ : int = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase__ : List[Any] = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase__ : List[Any] = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase__ : int = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
lowerCamelCase__ : Optional[int] = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = F"""Plugboard setting isn't type string ({type(_UpperCAmelCase )})"""
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
lowerCamelCase__ : Dict = F"""Odd number of symbols ({len(_UpperCAmelCase )})"""
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCamelCase__ : Dict = set()
for i in pbstring:
if i not in abc:
lowerCamelCase__ : Union[str, Any] = F"""'{i}' not in list of symbols"""
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
lowerCamelCase__ : Optional[Any] = F"""Duplicate symbol ({i})"""
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
lowerCamelCase__ : Dict = {}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
lowerCamelCase__ : int = pbstring[j + 1]
lowerCamelCase__ : Union[str, Any] = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (rotora, rotora, rotora) , _UpperCAmelCase = "" , ) -> str:
lowerCamelCase__ : List[Any] = text.upper()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = _validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = rotor_position
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase__ : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase__ : Tuple = plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase__ : Optional[Any] = abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase__ : int = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
lowerCamelCase__ : Dict = abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase__ : Optional[Any] = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
lowerCamelCase__ : str = abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase__ : Optional[Any] = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase__ : List[Any] = reflector[symbol]
# 2nd rotors
lowerCamelCase__ : Union[str, Any] = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase__ : Tuple = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase__ : str = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase__ : Dict = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = """This is my Python script that emulates the Enigma machine from WWII."""
_UpperCAmelCase : List[Any] = (1, 1, 1)
_UpperCAmelCase : List[Any] = """pictures"""
_UpperCAmelCase : int = (rotora, rotora, rotora)
_UpperCAmelCase : Any = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 50
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase :
UpperCAmelCase__ = None
@experimental
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return _map_with_joblib(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = num_proc if num_proc <= len(_UpperCAmelCase ) else len(_UpperCAmelCase )
lowerCamelCase__ : str = [] # We organize the splits ourselve (contiguous splits)
for index in range(_UpperCAmelCase ):
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) // num_proc
lowerCamelCase__ : List[Any] = len(_UpperCAmelCase ) % num_proc
lowerCamelCase__ : List[str] = div * index + min(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : str = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_UpperCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(_UpperCAmelCase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(_UpperCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowerCamelCase__ , lowerCamelCase__ : List[str] = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(_UpperCAmelCase , initargs=_UpperCAmelCase , initializer=_UpperCAmelCase ) as pool:
lowerCamelCase__ : Union[str, Any] = pool.map(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F"""Finished {num_proc} processes""" )
lowerCamelCase__ : List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(_UpperCAmelCase )} objects""" )
return mapped
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_UpperCAmelCase ):
return joblib.Parallel()(
joblib.delayed(_UpperCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : List[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ : List[Any] = None
| 50
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ : List[Any] = n - k
# Calculate C(n,k)
for i in range(_UpperCAmelCase ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Dict = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 50
|
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(_UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCAmelCase ) ):
if valid_connection(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : List[str] = -1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 0 ) -> list[int]:
lowerCamelCase__ : Any = [-1] * (len(_UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , 1 ) else []
| 50
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50
| 1
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
# ===== initialization =====
lowerCamelCase__ : Union[str, Any] = Mock()
lowerCamelCase__ : Any = conn, Mock()
lowerCamelCase__ : int = iter([1, None] )
lowerCamelCase__ : Optional[int] = lambda _UpperCAmelCase : next(_UpperCAmelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=_UpperCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 50
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : str = set()
lowerCamelCase__ : Any = []
def parse_line(_UpperCAmelCase ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : str = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCamelCase__ : List[str] = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
return values.split(',' )
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50
| 0
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : Dict = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowercase : Union[str, Any] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowercase : Optional[int] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = simple_accuracy(snake_case__ , snake_case__ )
A : List[Any] = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = float(pearsonr(snake_case__ , snake_case__ )[0] )
A : str = float(spearmanr(snake_case__ , snake_case__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 3
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[str] = '''dpr'''
def __init__( self : List[Any] , UpperCAmelCase__ : Tuple=3_0_5_2_2 , UpperCAmelCase__ : Tuple=7_6_8 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : Tuple=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : int = 0 , **UpperCAmelCase__ : Union[str, Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = projection_dim
lowerCAmelCase = position_embedding_type
| 4
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 5
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A : Optional[Any] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(a )
class __A( a ):
snake_case_ = '''rag'''
snake_case_ = True
def __init__( self , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=" / " , _snake_case=" // " , _snake_case=5 , _snake_case=300 , _snake_case=768 , _snake_case=8 , _snake_case="wiki_dpr" , _snake_case="train" , _snake_case="compressed" , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=0.0 , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
bos_token_id=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , is_encoder_decoder=_snake_case , prefix=_snake_case , vocab_size=_snake_case , **_snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__a = kwargs.pop('''question_encoder''' )
__a = question_encoder_config.pop('''model_type''' )
__a = kwargs.pop('''generator''' )
__a = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = reduce_loss
__a = label_smoothing
__a = exclude_bos_score
__a = do_marginalize
__a = title_sep
__a = doc_sep
__a = n_docs
__a = max_combined_length
__a = dataset
__a = dataset_split
__a = index_name
__a = retrieval_vector_size
__a = retrieval_batch_size
__a = passages_path
__a = index_path
__a = use_dummy_dataset
__a = output_retrieved
__a = do_deduplication
__a = use_cache
if self.forced_eos_token_id is None:
__a = getattr(self.generator , '''forced_eos_token_id''' , _snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig:
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.question_encoder.to_dict()
__a = self.generator.to_dict()
__a = self.__class__.model_type
return output
| 6
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50
| 0
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = CLIPConfig
lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self : int,lowercase_ : CLIPConfig )-> Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase_ )
A__ = CLIPVisionModelWithProjection(config.vision_config )
A__ = nn.Linear(config.vision_config.projection_dim,1 )
A__ = nn.Linear(config.vision_config.projection_dim,1 )
@torch.no_grad()
def snake_case__ ( self : Optional[int],lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : str=0.5,lowercase_ : List[Any]=0.5 )-> List[str]:
'''simple docstring'''
A__ = self.vision_model(lowercase_ )[0]
A__ = self.p_head(lowercase_ )
A__ = nsfw_detected.flatten()
A__ = nsfw_detected > p_threshold
A__ = nsfw_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowercase_ ):
if nsfw_detected_:
A__ = np.zeros(images[idx].shape )
A__ = self.w_head(lowercase_ )
A__ = watermark_detected.flatten()
A__ = watermark_detected > w_threshold
A__ = watermark_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowercase_ ):
if watermark_detected_:
A__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 7
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "xlm-roberta-xl"
def __init__( self : str , _UpperCamelCase : Union[str, Any]=2_5_0_8_8_0 , _UpperCamelCase : List[Any]=2_5_6_0 , _UpperCamelCase : Any=3_6 , _UpperCamelCase : Dict=3_2 , _UpperCamelCase : Optional[int]=1_0_2_4_0 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Union[str, Any]=5_1_4 , _UpperCamelCase : Dict=1 , _UpperCamelCase : int=0.02 , _UpperCamelCase : List[str]=1e-05 , _UpperCamelCase : Dict=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : str=2 , _UpperCamelCase : Dict="absolute" , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Dict=None , **_UpperCamelCase : List[Any] , ) ->Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class snake_case_ ( __A ):
'''simple docstring'''
@property
def snake_case__( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 8
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = 10
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3, 4]
__SCREAMING_SNAKE_CASE : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [] )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = ''''''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [] )
self.assertEqual(lowerCAmelCase__ , [] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = process_story(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 2, 3, 4] )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__SCREAMING_SNAKE_CASE : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __magic_name__( self :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = 101
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__SCREAMING_SNAKE_CASE : str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__SCREAMING_SNAKE_CASE : Tuple = compute_token_type_ids(lowerCAmelCase__ , lowerCAmelCase__ )
np.testing.assert_array_equal(lowerCAmelCase__ , lowerCAmelCase__ )
| 9
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =R"\w+[.]\d+"
lowerCamelCase__: int =re.findall(__a , __a )
for pat in pats:
lowerCamelCase__: Optional[int] =key.replace(__a , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__: List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__: Union[str, Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__: int =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__: str =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__: int =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __a , __a , __a=42 ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: int ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__: int =flax_model.init_weights(PRNGKey(__a ) )
lowerCamelCase__: Optional[int] =flatten_dict(__a )
lowerCamelCase__: int ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__: List[str] =rename_key(__a )
lowerCamelCase__: str =tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__: List[str] =rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Union[str, Any] =jnp.asarray(__a )
return unflatten_dict(__a )
| 10
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
UpperCAmelCase_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 12
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50
| 0
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : str = ["""small""", """medium""", """large"""]
lowerCAmelCase : Dict = """lm_head.decoder.weight"""
lowerCAmelCase : Optional[Any] = """lm_head.weight"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = torch.load(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowerCAmelCase : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : Tuple = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
lowerCAmelCase : List[Any] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50
| 0
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Any = 16
_lowerCamelCase : Any = 32
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 ) -> Tuple:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase : Union[str, Any] = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase_ ) == "1":
A__ = 2
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase_ )
def inner_training_loop(lowercase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=lowercase_ )
A__ , A__ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**lowercase_ )
A__ = outputs.loss
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 14
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__A = BlipProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Union[str, Any] ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : List[str] ,**A : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : List[Any] ):
__A = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 15
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
"""simple docstring"""
import sys
from collections import defaultdict
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.node_position[vertex]
def _lowercase ( self : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : str ):
__lowercase = pos
def _lowercase ( self : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowercase = 2 * start + 1
else:
__lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowercase ,__lowercase = heap[smallest_child], positions[smallest_child]
__lowercase ,__lowercase = (
heap[start],
positions[start],
)
__lowercase ,__lowercase = temp, tempa
__lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], UpperCAmelCase__ )
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Tuple ):
__lowercase = position[index]
while index != 0:
__lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowercase = heap[parent]
__lowercase = position[parent]
self.set_position(position[parent], UpperCAmelCase__ )
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, UpperCAmelCase__ )
break
__lowercase = parent
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, 0 )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
__lowercase = len(UpperCAmelCase__ ) // 2 - 1
for i in range(UpperCAmelCase__, -1, -1 ):
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, len(UpperCAmelCase__ ), UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = positions[0]
__lowercase = sys.maxsize
self.top_to_bottom(UpperCAmelCase__, 0, len(UpperCAmelCase__ ), UpperCAmelCase__ )
return temp
def _A ( UpperCamelCase_ : Dict) -> Optional[Any]:
'''simple docstring'''
__lowercase = Heap()
__lowercase = [0] * len(UpperCamelCase_)
__lowercase = [-1] * len(UpperCamelCase_) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowercase = [] # Heap of Distance of vertices from their neighboring vertex
__lowercase = []
for vertex in range(len(UpperCamelCase_)):
distance_tv.append(sys.maxsize)
positions.append(UpperCamelCase_)
heap.node_position.append(UpperCamelCase_)
__lowercase = []
__lowercase = 1
__lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowercase = 0
__lowercase = distance
heap.heapify(UpperCamelCase_, UpperCamelCase_)
for _ in range(1, len(UpperCamelCase_)):
__lowercase = heap.delete_minimum(UpperCamelCase_, UpperCamelCase_)
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex))
__lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_)]
):
__lowercase = distance
heap.bottom_to_top(
UpperCamelCase_, heap.get_position(UpperCamelCase_), UpperCamelCase_, UpperCamelCase_)
__lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input('Enter number of edges: ').strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 17
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50
| 0
|
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(lowerCAmelCase ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE_ : List[str] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ : Any = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i, name in enumerate(lowerCAmelCase ):
for letter in name:
name_score += ord(lowerCAmelCase ) - 6_4
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution())
| 18
|
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionPanoramaPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[str]:
lowerCamelCase_ = torch.manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = "french fries"
lowerCamelCase_ = sd_pipe(**lowercase , negative_prompt=lowercase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase , view_batch_size=2 )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=lowercase )
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> Any:
lowerCamelCase_ = torch.manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCamelCase_ = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowercase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCamelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = 0
def callback_fn(lowercase , lowercase , lowercase ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase_ = False
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**lowercase , callback=lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 19
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 20
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : str = StableDiffusionLatentUpscalePipeline
lowercase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
lowercase_ : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
lowercase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Dict = frozenset([] )
lowercase_ : Dict = True
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = 1
_lowercase : List[str] = 4
_lowercase : Optional[Any] = (16, 16)
_lowercase : List[Any] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[Any] = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=lowerCamelCase, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=1_60, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=lowerCamelCase, only_cross_attention=lowerCamelCase, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
_lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
_lowercase : Optional[Any] = EulerDiscreteScheduler(prediction_type='sample')
_lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, hidden_act='quick_gelu', projection_dim=5_12, )
_lowercase : Optional[Any] = CLIPTextModel(lowerCamelCase)
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu'
_lowercase : int = self.get_dummy_components()
_lowercase : Optional[Any] = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 2_56, 2_56, 3))
_lowercase : Union[str, Any] = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
_lowercase : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Dict = self.pipeline_class(**lowerCamelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[Any] = 2
_lowercase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowercase : int = getattr(lowerCamelCase, scheduler_enum.name)
_lowercase : Dict = scheduler_cls.from_config(pipe.scheduler.config)
_lowercase : str = pipe(**lowerCamelCase)[0]
outputs.append(lowerCamelCase)
assert check_same_shape(lowerCamelCase)
@require_torch_gpu
@slow
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = torch.manual_seed(33)
_lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa)
pipe.to('cuda')
_lowercase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa)
upscaler.to('cuda')
_lowercase : List[str] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowercase : Any = pipe(lowerCamelCase, generator=lowerCamelCase, output_type='latent').images
_lowercase : List[Any] = upscaler(
prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=20, guidance_scale=0, generator=lowerCamelCase, output_type='np', ).images[0]
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = torch.manual_seed(33)
_lowercase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa)
upscaler.to('cuda')
_lowercase : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowercase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
_lowercase : int = upscaler(
prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=20, guidance_scale=0, generator=lowerCamelCase, output_type='np', ).images[0]
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5E-2
| 21
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 0
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.getLogger()
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCAmelCase = parser.parse_args()
return args.f
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : int="eval" ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = os.path.join(__lowercase , f'{split}_results.json' )
if os.path.exists(__lowercase ):
with open(__lowercase , "r" ) as f:
return json.load(__lowercase )
raise ValueError(f'can\'t find {path}' )
__SCREAMING_SNAKE_CASE :List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_flax_glue.main()
_UpperCAmelCase = get_results(snake_case_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def lowercase ( self : int ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_clm_flax.main()
_UpperCAmelCase = get_results(snake_case_ )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def lowercase ( self : str ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_summarization_flax.main()
_UpperCAmelCase = get_results(snake_case_ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_mlm_flax.main()
_UpperCAmelCase = get_results(snake_case_ )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_ta_mlm_flax.main()
_UpperCAmelCase = get_results(snake_case_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def lowercase ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_flax_ner.main()
_UpperCAmelCase = get_results(snake_case_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(snake_case_ , "argv" , snake_case_ ):
run_qa.main()
_UpperCAmelCase = get_results(snake_case_ )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 22
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : str = set()
lowerCamelCase__ : Any = []
def parse_line(_UpperCAmelCase ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : str = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCamelCase__ : List[str] = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
return values.split(',' )
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50
| 0
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : Optional[int] , a__ : List[Any]=13 , a__ : Dict=7 , a__ : str=True , a__ : str=True , a__ : Optional[int]=False , a__ : Optional[int]=True , a__ : Union[str, Any]=99 , a__ : Optional[int]=32 , a__ : Optional[int]=5 , a__ : Optional[Any]=4 , a__ : str=37 , a__ : Union[str, Any]="gelu" , a__ : int=0.1 , a__ : List[Any]=0.1 , a__ : Dict=512 , a__ : Optional[int]=16 , a__ : List[Any]=2 , a__ : str=0.0_2 , a__ : int=3 , a__ : Tuple=4 , a__ : Optional[Any]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a (self : str ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def a (self : str , a__ : int , a__ : str , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = LlamaModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ )
__snake_case = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a (self : Optional[Any] , a__ : str , a__ : List[Any] , a__ : Dict , a__ : Any , a__ : Tuple , a__ : Optional[int] , a__ : List[str] , a__ : Optional[Any] , a__ : Dict , ):
"""simple docstring"""
__snake_case = True
__snake_case = LlamaModel(a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
__snake_case = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
__snake_case = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a (self : Any , a__ : Union[str, Any] , a__ : Tuple , a__ : Tuple , a__ : int , a__ : int , a__ : str , a__ : int , a__ : List[Any] , a__ : List[Any] , ):
"""simple docstring"""
__snake_case = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a (self : int , a__ : str , a__ : Tuple , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , a__ : str , a__ : Optional[int] , a__ : Union[str, Any] , a__ : int , ):
"""simple docstring"""
__snake_case = True
__snake_case = True
__snake_case = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
__snake_case = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
__snake_case = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
A_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
A_ : Dict = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Union[str, Any] = False
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = LlamaModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['''input_ids''']
__snake_case = input_ids.ne(1 ).to(a__ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = '''single_label_classification'''
__snake_case = input_dict['''input_ids''']
__snake_case = input_ids.ne(1 ).to(a__ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = '''multi_label_classification'''
__snake_case = input_dict['''input_ids''']
__snake_case = input_ids.ne(1 ).to(a__ )
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a (self : Optional[Any] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 10] , config.vocab_size )
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case = LlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
__snake_case = original_model(a__ ).last_hidden_state
__snake_case = original_model(a__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'''type''': scaling_type, '''factor''': 1_0.0}
__snake_case = LlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
__snake_case = scaled_model(a__ ).last_hidden_state
__snake_case = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def a (self : Tuple ):
"""simple docstring"""
__snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
__snake_case = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__snake_case = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
__snake_case = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
__snake_case = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
__snake_case = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
__snake_case = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
__snake_case = model(torch.tensor(a__ ) )
__snake_case = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__snake_case = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def a (self : List[str] ):
"""simple docstring"""
__snake_case = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
__snake_case = '''Simply put, the theory of relativity states that '''
__snake_case = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
__snake_case = tokenizer.encode(a__ , return_tensors='''pt''' )
__snake_case = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=a__ )
# greedy generation outputs
__snake_case = model.generate(a__ , max_new_tokens=64 , top_p=a__ , temperature=1 , do_sample=a__ )
__snake_case = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 24
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) < MIN_NUM_TOKENS:
return None
_A : Tuple = MinHash(num_perm=snake_case_ )
for token in set(snake_case_ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase_ ( snake_case_ ):
return {t for t in NON_ALPHA.split(snake_case_ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self , *,
_a = 0.85 , ) -> List[Any]:
_A : Any = duplication_jaccard_threshold
_A : int = NUM_PERM
_A : List[str] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_A : str = defaultdict(_a )
def a__ ( self , _a , _a ) -> None:
_A : Any = self._index.query(_a )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def a__ ( self ) -> List[List[Dict]]:
_A : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
_A : Dict = [base] + list(_a )
# reformat the cluster to be a list of dict
_A : Tuple = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def a__ ( self , _a ) -> None:
_A : str = self.get_duplicate_clusters()
with open(_a , """w""" ) as f:
json.dump(_a , _a )
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : Tuple = element
_A : Any = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase_ ( snake_case_ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash,ThreadedIterator(snake_case_,max_queue_size=10000 ),chunksize=100,):
if data is not None:
yield data
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = DuplicationIndex(duplication_jaccard_threshold=snake_case_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case_ ) ),max_queue_size=100 ) ):
di.add(snake_case_,snake_case_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = get_tokens(snake_case_ )
_A : Dict = get_tokens(snake_case_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = []
for elementa in cluster:
_A : List[str] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_A : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(snake_case_,snake_case_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_A : int = 1
extremes.append(snake_case_ )
return extremes
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
global _shared_dataset
_A : Any = dataset
_A : List[Any] = []
_A : Optional[int] = partial(_find_cluster_extremes_shared,jaccard_threshold=snake_case_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case_,snake_case_,),total=len(snake_case_ ),):
extremes_list.append(snake_case_ )
return extremes_list
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.85 ):
_A : List[Any] = make_duplicate_clusters(snake_case_,snake_case_ )
_A : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_A : List[str] = {}
_A : Optional[Any] = find_extremes(snake_case_,snake_case_,snake_case_ )
for extremes in extremes_clusters:
for element in extremes:
_A : int = element
_A : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
_A : int = dataset.filter(lambda snake_case_,snake_case_ : idx not in remove_indices,with_indices=snake_case_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_A : int = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_A : Any = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(snake_case_ )}''' )
print(f'''Number of duplicate clusters: {len(snake_case_ )}''' )
print(f'''Files in duplicate cluster: {len(snake_case_ )}''' )
print(f'''Unique files in duplicate cluster: {len(snake_case_ )}''' )
print(f'''Filtered dataset size: {len(snake_case_ )}''' )
return ds_filter, duplicate_clusters
| 26
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__lowercase : List[str] = datasets.logging.get_logger(__name__)
__lowercase : List[str] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__lowercase : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__lowercase : Optional[int] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
__lowercase : str = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
__a : int = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
__a : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__a : List[str] = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__a : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__a : Optional[int] = score.BleurtScorer(os.path.join(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.scorer.score(references=__a , candidates=__a )
return {"scores": scores}
| 27
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
from typing import Any
import numpy as np
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return np.array_equal(A__ , matrix.conjugate().T )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = v.conjugate().T
UpperCamelCase = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(A__ , A__ ) )
UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 28
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=1_0 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , ) -> Union[str, Any]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : int = embeddings_size
UpperCAmelCase_ : Optional[int] = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Any = len(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = FlaxRegNetModel(config=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = FlaxRegNetForImageClassification(config=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_snake_case : int = False
_snake_case : Optional[Any] = False
_snake_case : Optional[int] = False
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase_ : List[Any] = FlaxRegNetModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> str:
return
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
UpperCAmelCase_ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = model_class(_UpperCamelCase )
UpperCAmelCase_ : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Dict = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = model_class(_UpperCamelCase )
@jax.jit
def model_jitted(_UpperCamelCase , **_UpperCamelCase ):
return model(pixel_values=_UpperCamelCase , **_UpperCamelCase )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : Optional[int] = model_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : int = model_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Tuple = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : int = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Tuple = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 0
|
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
if n == 0:
return 0
lowercase_ = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase_ = max(
snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) )
return max_revue
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
lowercase_ = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ )
def a ( snake_case__: int , snake_case__: list , snake_case__: list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase_ = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase_ = max(
snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , )
lowercase_ = max_revenue
return max_rev[n]
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase_ = [float('''-inf''' ) for _ in range(n + 1 )]
lowercase_ = 0
for i in range(1 , n + 1 ):
lowercase_ = max_rev[i]
for j in range(1 , i + 1 ):
lowercase_ = max(snake_case__ , prices[j - 1] + max_rev[i - j] )
lowercase_ = max_revenue_i
return max_rev[n]
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
if n < 0:
lowercase_ = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
lowercase_ = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(snake_case__ )}'''
)
raise ValueError(snake_case__ )
def a ( ):
'''simple docstring'''
lowercase_ = [6, 10, 12, 15, 20, 23]
lowercase_ = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase_ = 36
lowercase_ = top_down_cut_rod(snake_case__ , snake_case__ )
lowercase_ = bottom_up_cut_rod(snake_case__ , snake_case__ )
lowercase_ = naive_cut_rod_recursive(snake_case__ , snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 30
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : List[Any] , A : List[str]=3 , A : List[str]=32 , A : Optional[int]=3 , A : str=10 , A : Any=[10, 20, 30, 40] , A : int=[1, 1, 2, 1] , A : int=True , A : Dict=True , A : Optional[int]="relu" , A : Optional[int]=3 , A : Tuple=None , ):
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : List[str] = embeddings_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Optional[int] = depths
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : List[Any] = len(A )
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _A ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self : Optional[int] , A : Any , A : int , A : str ):
_UpperCAmelCase : int = TFRegNetModel(config=A )
_UpperCAmelCase : Optional[int] = model(A , training=A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self : Union[str, Any] , A : str , A : Optional[Any] , A : Optional[int] ):
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : str = TFRegNetForImageClassification(A )
_UpperCAmelCase : Tuple = model(A , labels=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: int = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__UpperCamelCase: Union[str, Any] = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCamelCase: Dict = False
__UpperCamelCase: Optional[int] = False
__UpperCamelCase: Any = False
__UpperCamelCase: Optional[Any] = False
__UpperCamelCase: Tuple = False
def _A ( self : Any ):
_UpperCAmelCase : Any = TFRegNetModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A )
def _A ( self : List[Any] ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _A ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _A ( self : Optional[Any] ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _A ( self : str ):
pass
def _A ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : List[Any] ):
def check_hidden_states_output(A : Any , A : Any , A : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(A , A ) , training=A )
_UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase : List[str] = layer_type
_UpperCAmelCase : Dict = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _A ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A : Optional[int] , A : Union[str, Any] , A : Optional[Any] , A : str={} ):
_UpperCAmelCase : Tuple = model(A , return_dict=A , **A )
_UpperCAmelCase : Any = model(A , return_dict=A , **A ).to_tuple()
def recursive_check(A : Tuple , A : Tuple ):
if isinstance(A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A , A ):
recursive_check(A , A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(A , A ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(A , A )
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(A )
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : str = self._prepare_for_class(A , A )
check_equivalence(A , A , A )
_UpperCAmelCase : Optional[int] = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : Optional[int] = self._prepare_for_class(A , A , return_labels=A )
check_equivalence(A , A , A )
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : Tuple = self._prepare_for_class(A , A )
check_equivalence(A , A , A , {"output_hidden_states": True} )
_UpperCAmelCase : int = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : Union[str, Any] = self._prepare_for_class(A , A , return_labels=A )
check_equivalence(A , A , A , {"output_hidden_states": True} )
def _A ( self : Tuple ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _A ( self : List[str] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = TFRegNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Optional[int] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self : List[str] ):
_UpperCAmelCase : Dict = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=A , return_tensors="tf" )
# forward pass
_UpperCAmelCase : Optional[Any] = model(**A , training=A )
# verify the logits
_UpperCAmelCase : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
_UpperCAmelCase : Optional[Any] = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A , atol=1E-4 )
| 31
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50
| 0
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ) -> None:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 32
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[str] , A : Union[str, Any] ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowercase_ : Tuple = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(A )
def A ( self : int ) -> Optional[Any]:
lowercase_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
lowercase_ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : List[str] = PyTorchBenchmark(A )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : int ) -> Optional[Any]:
lowercase_ : List[Any] = '''sgugger/tiny-distilbert-classification'''
lowercase_ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
lowercase_ : Tuple = PyTorchBenchmark(A )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[Any] ) -> str:
lowercase_ : str = '''sshleifer/tiny-gpt2'''
lowercase_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : List[Any] = PyTorchBenchmark(A )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def A ( self : List[Any] ) -> str:
lowercase_ : int = '''sshleifer/tiny-gpt2'''
lowercase_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : Any = PyTorchBenchmark(A )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
lowercase_ : Optional[int] = AutoConfig.from_pretrained(A )
# set architectures equal to `None`
lowercase_ : str = None
lowercase_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : Any = PyTorchBenchmark(A , configs=[config] )
lowercase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ) -> List[Any]:
lowercase_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
lowercase_ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : List[str] = PyTorchBenchmark(A )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
lowercase_ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
lowercase_ : int = PyTorchBenchmark(A )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : int ) -> Optional[Any]:
lowercase_ : List[Any] = '''sshleifer/tiny-gpt2'''
lowercase_ : Any = AutoConfig.from_pretrained(A )
lowercase_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : Optional[Any] = PyTorchBenchmark(A , configs=[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ) -> List[Any]:
lowercase_ : Union[str, Any] = '''sshleifer/tinier_bart'''
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(A )
lowercase_ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : Any = PyTorchBenchmark(A , configs=[config] )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : str = '''sshleifer/tiny-gpt2'''
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(A )
lowercase_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : List[str] = PyTorchBenchmark(A , configs=[config] )
lowercase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ) -> int:
lowercase_ : Optional[Any] = '''sshleifer/tinier_bart'''
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(A )
lowercase_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowercase_ : str = PyTorchBenchmark(A , configs=[config] )
lowercase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : List[Any] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(A , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(A , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(A , '''train_time.csv''' ) , env_info_csv_file=os.path.join(A , '''env.csv''' ) , multi_process=A , )
lowercase_ : str = PyTorchBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''env.csv''' ) ).exists() )
def A ( self : str ) -> Tuple:
lowercase_ : str = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(A : List[Any] ):
self.assertTrue(hasattr(A , '''sequential''' ) )
self.assertTrue(hasattr(A , '''cumulative''' ) )
self.assertTrue(hasattr(A , '''current''' ) )
self.assertTrue(hasattr(A , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , '''log.txt''' ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
lowercase_ : Optional[int] = PyTorchBenchmark(A )
lowercase_ : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A , '''log.txt''' ) ).exists() )
| 33
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
'''simple docstring'''
def snake_case_ (_a : int ):
UpperCAmelCase = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ (_a : int = 5_0_0_0 ):
UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , _a )]
for i, pentagonal_i in enumerate(_a ):
for j in range(_a , len(_a ) ):
UpperCAmelCase = pentagonal_nums[j]
UpperCAmelCase = pentagonal_i + pentagonal_j
UpperCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(_a ) and is_pentagonal(_a ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
import operator
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ) -> list:
snake_case__ : int = operator.lt if reverse else operator.gt
snake_case__ : Any = solution or []
if not arr:
return solution
snake_case__ : int = [arr.pop(0 )]
for i, item in enumerate(_lowerCAmelCase ):
if _operator(_lowerCAmelCase , sublist[-1] ):
sublist.append(_lowerCAmelCase )
arr.pop(_lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCAmelCase )
else:
while sublist:
snake_case__ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(_lowerCAmelCase ):
if not _operator(_lowerCAmelCase , _lowerCAmelCase ):
solution.insert(_lowerCAmelCase , _lowerCAmelCase )
break
else:
solution.append(_lowerCAmelCase )
strand_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 35
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
inputs.append(create_inputs(_lowerCamelCase ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for output in outputs:
if isinstance(_lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class UpperCAmelCase_ :
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, "inputs"))
self.assertTrue(hasattr(self.tool, "outputs"))
_lowerCAmelCase : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input, __a):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
_lowerCAmelCase : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = create_inputs(self.tool.inputs)
_lowerCAmelCase : Dict = self.tool(*__a)
# There is a single output
if len(self.tool.outputs) == 1:
_lowerCAmelCase : Dict = [outputs]
self.assertListEqual(output_types(__a), self.tool.outputs)
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, "description"))
self.assertTrue(hasattr(self.tool, "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = create_inputs(self.tool.inputs)
_lowerCAmelCase : Any = self.tool(*__a)
if not isinstance(__a, __a):
_lowerCAmelCase : str = [outputs]
self.assertEqual(len(__a), len(self.tool.outputs))
for output, output_type in zip(__a, self.tool.outputs):
_lowerCAmelCase : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = create_inputs(self.tool.inputs)
_lowerCAmelCase : Tuple = []
for _input, input_type in zip(__a, self.tool.inputs):
if isinstance(__a, __a):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
_lowerCAmelCase : Dict = self.tool(*__a)
if not isinstance(__a, __a):
_lowerCAmelCase : Any = [outputs]
self.assertEqual(len(__a), len(self.tool.outputs))
| 36
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ):
UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase :str = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Dict = num_channels
UpperCamelCase :str = image_size
UpperCamelCase :Dict = min_resolution
UpperCamelCase :str = max_resolution
UpperCamelCase :Union[str, Any] = do_resize
UpperCamelCase :Optional[Any] = size
UpperCamelCase :Any = do_normalize
UpperCamelCase :Optional[Any] = image_mean
UpperCamelCase :Tuple = image_std
def _A ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None
def _A ( self : str ):
UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self )
@property
def _A ( self : List[str] ):
return self.image_proc_tester.prepare_image_processor_dict()
def _A ( self : int ):
UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
def _A ( self : Optional[int] ):
pass
def _A ( self : str ):
# Initialize image_processor
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processor
UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : List[Any] ):
# Initialize image_processor
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 38
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_a = logging.get_logger(__name__)
@add_end_docstrings(snake_case__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , **UpperCAmelCase ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCAmelCase = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
_UpperCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
_UpperCAmelCase = image
_UpperCAmelCase = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
_UpperCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = load_image(inputs['image'] )
_UpperCAmelCase = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = candidate_labels.split(',' )
_UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
_UpperCAmelCase = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_inputs.pop('target_size' )
_UpperCAmelCase = model_inputs.pop('candidate_label' )
_UpperCAmelCase = model_inputs.pop('is_last' )
_UpperCAmelCase = self.model(**UpperCAmelCase )
_UpperCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0.1 , UpperCAmelCase=None ):
"""simple docstring"""
_UpperCAmelCase = []
for model_output in model_outputs:
_UpperCAmelCase = model_output['candidate_label']
_UpperCAmelCase = BaseModelOutput(UpperCAmelCase )
_UpperCAmelCase = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase = outputs['scores'][index].item()
_UpperCAmelCase = self._get_bounding_box(outputs['boxes'][index][0] )
_UpperCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
_UpperCAmelCase = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
_UpperCAmelCase = results[:top_k]
return results
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 39
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """lxmert"""
UpperCAmelCase : str = {}
def __init__( self : int , __UpperCAmelCase : List[str]=30522 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : List[Any]=9500 , __UpperCAmelCase : Dict=1600 , __UpperCAmelCase : Tuple=400 , __UpperCAmelCase : List[str]=3072 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : Optional[int]=9 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : List[str]=2048 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Tuple=6.67 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : str , ):
a : Tuple = vocab_size
a : Optional[Any] = hidden_size
a : List[str] = num_attention_heads
a : int = hidden_act
a : Dict = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : Tuple = layer_norm_eps
a : Any = num_qa_labels
a : str = num_object_labels
a : Any = num_attr_labels
a : Union[str, Any] = l_layers
a : int = x_layers
a : str = r_layers
a : Dict = visual_feat_dim
a : int = visual_pos_dim
a : Optional[int] = visual_loss_normalizer
a : Any = task_matched
a : Optional[int] = task_mask_lm
a : Union[str, Any] = task_obj_predict
a : List[str] = task_qa
a : Any = visual_obj_loss
a : Union[str, Any] = visual_attr_loss
a : int = visual_feat_loss
a : int = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__UpperCAmelCase)
| 40
|
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_A : Union[str, Any] ='''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
_A : Union[str, Any] ='''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
_A : Dict ='''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowerCamelCase_ ( self: List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] , UpperCamelCase__: int , UpperCamelCase__: Any=None , UpperCamelCase__: Any=False , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: List[Any]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase__ : Dict = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
lowerCamelCase__ : Union[str, Any] = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
lowerCamelCase__ : int = np.asarray(UpperCamelCase__ )
lowerCamelCase__ : Tuple = np.asarray(UpperCamelCase__ )
if ignore_case:
lowerCamelCase__ : Union[str, Any] = np.char.lower(UpperCamelCase__ )
lowerCamelCase__ : Tuple = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
lowerCamelCase__ : Dict = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCamelCase__ : Union[str, Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
lowerCamelCase__ : Any = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
lowerCamelCase__ : int = string.digits.maketrans("""""" , """""" , string.digits )
lowerCamelCase__ : Optional[int] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
lowerCamelCase__ : Dict = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 41
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Union[str, Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_lowerCAmelCase : Any = str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = """""".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 44
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : str = set()
lowerCamelCase__ : Any = []
def parse_line(_UpperCAmelCase ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : str = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCamelCase__ : List[str] = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
return values.split(',' )
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
SCREAMING_SNAKE_CASE__ = {"allegro/herbert-base-cased": 514}
SCREAMING_SNAKE_CASE__ = {}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = HerbertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="</s>" , **lowercase , ) -> List[Any]:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , sep_token=lowercase , **lowercase , )
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 46
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase : List[str] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCamelCase ) )
] # the reference grid
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCamelCase ) )
] # the action grid
_SCREAMING_SNAKE_CASE =init[0]
_SCREAMING_SNAKE_CASE =init[1]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =g + heuristic[x][y] # cost from starting cell to destination cell
_SCREAMING_SNAKE_CASE =[[f, g, x, y]]
_SCREAMING_SNAKE_CASE =False # flag that is set when search is complete
_SCREAMING_SNAKE_CASE =False # flag set if we can't find expand
while not found and not resign:
if len(_UpperCamelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_SCREAMING_SNAKE_CASE =cell.pop()
_SCREAMING_SNAKE_CASE =next_cell[2]
_SCREAMING_SNAKE_CASE =next_cell[3]
_SCREAMING_SNAKE_CASE =next_cell[1]
if x == goal[0] and y == goal[1]:
_SCREAMING_SNAKE_CASE =True
else:
for i in range(len(_UpperCamelCase ) ): # to try out different valid actions
_SCREAMING_SNAKE_CASE =x + DIRECTIONS[i][0]
_SCREAMING_SNAKE_CASE =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_SCREAMING_SNAKE_CASE =g + cost
_SCREAMING_SNAKE_CASE =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =i
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =goal[0]
_SCREAMING_SNAKE_CASE =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_SCREAMING_SNAKE_CASE =x - DIRECTIONS[action[x][y]][0]
_SCREAMING_SNAKE_CASE =y - DIRECTIONS[action[x][y]][1]
_SCREAMING_SNAKE_CASE =xa
_SCREAMING_SNAKE_CASE =ya
invpath.append([x, y] )
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_UpperCamelCase ) ):
path.append(invpath[len(_UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase : Any = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase : List[Any] = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase : Optional[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase : List[Any] = 9_9
lowerCamelCase , lowerCamelCase : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 47
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
# Imports
import numpy as np
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[Any]:
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
if red is not None:
lowerCamelCase : Any = red
if green is not None:
lowerCamelCase : List[str] = green
if blue is not None:
lowerCamelCase : str = blue
if red_edge is not None:
lowerCamelCase : Tuple = red_edge
if nir is not None:
lowerCamelCase : List[str] = nir
return True
def _lowercase ( self , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
lowerCamelCase : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowercase ( self ) -> Optional[Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowercase ( self ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowercase ( self ) -> Optional[Any]:
return self.nir * (self.red / (self.green**2))
def _lowercase ( self ) -> Any:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowercase ( self ) -> List[Any]:
return (self.nir - self.red) / (self.nir + self.red)
def _lowercase ( self ) -> Any:
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowercase ( self ) -> Any:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowercase ( self ) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _lowercase ( self ) -> Optional[Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowercase ( self ) -> Dict:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowercase ( self ) -> str:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowercase ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowercase ( self , UpperCamelCase__=0.08 , UpperCamelCase__=1.22 , UpperCamelCase__=0.03 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowercase ( self ) -> str:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowercase ( self ) -> Dict:
return (self.nir / self.green) - 1
def _lowercase ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _lowercase ( self ) -> Optional[int]:
return (self.red - self.blue) / self.red
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowercase ( self ) -> Optional[int]:
return self.nir - self.green
def _lowercase ( self ) -> Tuple:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _lowercase ( self , UpperCamelCase__=0.16 ) -> Any:
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowercase ( self , UpperCamelCase__=0.5 ) -> Tuple:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowercase ( self ) -> List[Any]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return (self.nir - b) / (a * self.red)
def _lowercase ( self ) -> Dict:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowercase ( self ) -> Union[str, Any]:
return (self.red + self.green + self.blue) / 30.5
def _lowercase ( self ) -> int:
return self.nir / self.red
def _lowercase ( self ) -> List[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowercase ( self ) -> Optional[Any]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowercase ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _lowercase ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _lowercase ( self ) -> Tuple:
return self.red / (self.nir + self.red + self.green)
def _lowercase ( self ) -> Optional[int]:
return (self.green - self.red) / (self.green + self.red)
def _lowercase ( self ) -> Any:
return (self.red - self.green) / (self.red + self.green)
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowercase ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowercase ( self ) -> Optional[int]:
return self.nir / self.red
def _lowercase ( self ) -> Optional[Any]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowercase ( self ) -> Dict:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 48
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
snake_case_ : Union[str, Any] = True
except (ImportError, AttributeError):
snake_case_ : str = object
def A (*__A : Tuple , **__A : List[str] ) -> List[str]:
"""simple docstring"""
pass
snake_case_ : List[str] = False
snake_case_ : Tuple = logging.get_logger("transformers-cli/serving")
def A (__A : Namespace ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__A , args.host , args.port , args.workers )
class __snake_case ( a ):
UpperCAmelCase__ : dict
class __snake_case ( a ):
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[List[int]]
class __snake_case ( a ):
UpperCAmelCase__ : str
class __snake_case ( a ):
UpperCAmelCase__ : Any
class __snake_case ( a ):
@staticmethod
def lowerCamelCase ( _snake_case : ArgumentParser):
"""simple docstring"""
UpperCAmelCase_ = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''')
serve_parser.add_argument(
'''--task''' , type=_snake_case , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_snake_case , default='''localhost''' , help='''Interface the server will listen on.''')
serve_parser.add_argument('''--port''' , type=_snake_case , default=8888 , help='''Port the serving will listen to.''')
serve_parser.add_argument('''--workers''' , type=_snake_case , default=1 , help='''Number of http workers''')
serve_parser.add_argument('''--model''' , type=_snake_case , help='''Model\'s name or path to stored model.''')
serve_parser.add_argument('''--config''' , type=_snake_case , help='''Model\'s config name or path to stored model.''')
serve_parser.add_argument('''--tokenizer''' , type=_snake_case , help='''Tokenizer name to use.''')
serve_parser.add_argument(
'''--device''' , type=_snake_case , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_snake_case)
def __init__( self : Tuple , _snake_case : Pipeline , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline
UpperCAmelCase_ = host
UpperCAmelCase_ = port
UpperCAmelCase_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''')
else:
logger.info(F"""Serving model over {host}:{port}""")
UpperCAmelCase_ = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_snake_case , response_class=_snake_case , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_snake_case , response_class=_snake_case , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_snake_case , response_class=_snake_case , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_snake_case , response_class=_snake_case , methods=['''POST'''] , ),
] , timeout=600 , )
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def lowerCamelCase ( self : Tuple , _snake_case : str = Body(_snake_case , embed=_snake_case) , _snake_case : bool = Body(_snake_case , embed=_snake_case)):
"""simple docstring"""
try:
UpperCAmelCase_ = self._pipeline.tokenizer.tokenize(_snake_case)
if return_ids:
UpperCAmelCase_ = self._pipeline.tokenizer.convert_tokens_to_ids(_snake_case)
return ServeTokenizeResult(tokens=_snake_case , tokens_ids=_snake_case)
else:
return ServeTokenizeResult(tokens=_snake_case)
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_snake_case)})
def lowerCamelCase ( self : Tuple , _snake_case : List[int] = Body(_snake_case , embed=_snake_case) , _snake_case : bool = Body(_snake_case , embed=_snake_case) , _snake_case : bool = Body(_snake_case , embed=_snake_case) , ):
"""simple docstring"""
try:
UpperCAmelCase_ = self._pipeline.tokenizer.decode(_snake_case , _snake_case , _snake_case)
return ServeDeTokenizeResult(model='''''' , text=_snake_case)
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_snake_case)})
async def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]=Body(_snake_case , embed=_snake_case)):
"""simple docstring"""
if len(_snake_case) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
UpperCAmelCase_ = self._pipeline(_snake_case)
return ServeForwardResult(output=_snake_case)
except Exception as e:
raise HTTPException(500 , {'''error''': str(_snake_case)})
| 51
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCamelCase : Tuple = (720, 1280) # Height, Width
__lowerCamelCase : int = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowerCamelCase : int = 1 / 100
__lowerCamelCase : Any = """"""
__lowerCamelCase : List[str] = """"""
__lowerCamelCase : List[Any] = """"""
__lowerCamelCase : Tuple = 250
def A_ ( ) -> None:
UpperCamelCase , UpperCamelCase : Tuple = get_dataset(_lowerCAmelCase , _lowerCAmelCase )
for index in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = random.sample(range(len(_lowerCAmelCase ) ) , 4 )
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = update_image_and_anno(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Union[str, Any] = random_chars(32 )
UpperCamelCase : Optional[Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCamelCase : Optional[Any] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
UpperCamelCase : int = []
for anno in new_annos:
UpperCamelCase : Dict = anno[3] - anno[1]
UpperCamelCase : Union[str, Any] = anno[4] - anno[2]
UpperCamelCase : Optional[int] = anno[1] + width / 2
UpperCamelCase : Tuple = anno[2] + height / 2
UpperCamelCase : Dict = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCAmelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[list, list]:
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
for label_file in glob.glob(os.path.join(_lowerCAmelCase , "*.txt" ) ):
UpperCamelCase : Union[str, Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCAmelCase ) as in_file:
UpperCamelCase : int = in_file.readlines()
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , F"""{label_name}.jpg""" )
UpperCamelCase : str = []
for obj_list in obj_lists:
UpperCamelCase : Optional[int] = obj_list.rstrip("\n" ).split(" " )
UpperCamelCase : Dict = float(obj[1] ) - float(obj[3] ) / 2
UpperCamelCase : List[str] = float(obj[2] ) - float(obj[4] ) / 2
UpperCamelCase : Dict = float(obj[1] ) + float(obj[3] ) / 2
UpperCamelCase : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCAmelCase )
labels.append(_lowerCAmelCase )
return img_paths, labels
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , ) -> tuple[list, list, str]:
UpperCamelCase : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCamelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCamelCase : Dict = int(scale_x * output_size[1] )
UpperCamelCase : Tuple = int(scale_y * output_size[0] )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : int = []
for i, index in enumerate(_lowerCAmelCase ):
UpperCamelCase : str = all_img_list[index]
path_list.append(_lowerCAmelCase )
UpperCamelCase : Tuple = all_annos[index]
UpperCamelCase : Union[str, Any] = cva.imread(_lowerCAmelCase )
if i == 0: # top-left
UpperCamelCase : int = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) )
UpperCamelCase : int = img
for bbox in img_annos:
UpperCamelCase : Any = bbox[1] * scale_x
UpperCamelCase : Optional[Any] = bbox[2] * scale_y
UpperCamelCase : str = bbox[3] * scale_x
UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCamelCase : Any = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
UpperCamelCase : str = img
for bbox in img_annos:
UpperCamelCase : str = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase : Tuple = bbox[2] * scale_y
UpperCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase : Dict = img
for bbox in img_annos:
UpperCamelCase : str = bbox[1] * scale_x
UpperCamelCase : List[str] = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase : Dict = bbox[3] * scale_x
UpperCamelCase : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCamelCase : str = cva.resize(
_lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCamelCase : Optional[int] = img
for bbox in img_annos:
UpperCamelCase : Dict = scale_x + bbox[1] * (1 - scale_x)
UpperCamelCase : Any = scale_y + bbox[2] * (1 - scale_y)
UpperCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
UpperCamelCase : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCamelCase : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def A_ ( _lowerCAmelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 52
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
"""simple docstring"""
import itertools
import math
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(lowerCAmelCase_ ):
yield num
num += 1
def UpperCAmelCase__ (lowerCAmelCase_ = 1_0001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase_ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __snake_case ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase_ )
env_command_parser(subparsers=UpperCAmelCase_ )
launch_command_parser(subparsers=UpperCAmelCase_ )
tpu_command_parser(subparsers=UpperCAmelCase_ )
test_command_parser(subparsers=UpperCAmelCase_ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__UpperCAmelCase ) == 1:
return True
snake_case_ = series[1] - series[0]
for index in range(len(__UpperCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
snake_case_ = 0
for val in series:
answer += val
return answer / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : int = {"vocab_file": "sentencepiece.bpe.model"}
A : str = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
A : int = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
A : int = "▁"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =VOCAB_FILES_NAMES
__UpperCAmelCase : int =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =["""input_ids""", """attention_mask"""]
def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__lowerCAmelCase = len(self.sp_model ) - 1
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case ( self , __a , __a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self , __a ):
return self.sp_model.encode(__a , out_type=__a )
def snake_case ( self , __a ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case ( self , __a ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case ( self , __a ):
__lowerCAmelCase = []
__lowerCAmelCase = ""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(__a )
__lowerCAmelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , __a ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 57
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bloom'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , A=25_0880 , A=64 , A=2 , A=8 , A=1e-5 , A=0.02 , A=True , A=1 , A=2 , A=False , A=0.0 , A=0.0 , A=1 , A=False , **A , ) -> Tuple:
_SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
_SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" , A )
_SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = pretraining_tp
_SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = slow_but_exact
super().__init__(bos_token_id=A , eos_token_id=A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.12''' )
def __init__( self , A , A = "default" , A = None , A = False , ) -> str:
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , """pad_token_id""" , A ):
# TODO: how to do that better?
_SCREAMING_SNAKE_CASE = 0
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
_SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A , direction="""inputs""" , inverted_values_shape=A )
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_( self ) -> int:
return self._config.n_layer
@property
def snake_case_( self ) -> int:
return self._config.n_head
@property
def snake_case_( self ) -> float:
return 1e-3
def snake_case_( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
_SCREAMING_SNAKE_CASE = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
_SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE = seqlen + 2
_SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads
_SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_SCREAMING_SNAKE_CASE = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
_SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
_SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
_SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def snake_case_( self ) -> int:
return 13
| 58
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50
| 0
|
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowerCamelCase , help="where to store parsed gold_data_path file" , )
snake_case : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
snake_case : Tuple = json.load(__lowerCamelCase )
for dpr_record in tqdm(__lowerCamelCase ):
snake_case : Dict = dpr_record["question"]
snake_case : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 59
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any ):
lowerCAmelCase : List[str] = []
for part_id in partition_order:
lowerCAmelCase : Optional[int] = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_snake_case ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
lowerCAmelCase : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase : Dict = spark.range(100 ).repartition(1 )
lowerCAmelCase : List[Any] = Spark(_snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
lowerCAmelCase : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase : List[str] = spark.range(10 ).repartition(2 )
lowerCAmelCase : List[Any] = [1, 0]
lowerCAmelCase : Union[str, Any] = _generate_iterable_examples(_snake_case , _snake_case ) # Reverse the partitions.
lowerCAmelCase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , _snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCAmelCase, lowerCAmelCase : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
lowerCAmelCase : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase : Any = spark.range(10 ).repartition(1 )
lowerCAmelCase : Optional[int] = SparkExamplesIterable(_snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_snake_case ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
lowerCAmelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase : Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowerCAmelCase : List[Any] = lambda _snake_case : x.reverse()
lowerCAmelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , [2, 1, 0] )
lowerCAmelCase : Optional[Any] = SparkExamplesIterable(_snake_case ).shuffle_data_sources(_snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_snake_case ):
lowerCAmelCase, lowerCAmelCase : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
lowerCAmelCase : Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCAmelCase : List[str] = SparkExamplesIterable(_snake_case ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , [0, 2] )
for i, (row_id, row_dict) in enumerate(_snake_case ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCAmelCase : Tuple = SparkExamplesIterable(_snake_case ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(_snake_case , [1, 3] )
for i, (row_id, row_dict) in enumerate(_snake_case ):
lowerCAmelCase, lowerCAmelCase : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
lowerCAmelCase : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase : List[Any] = spark.range(100 ).repartition(1 )
lowerCAmelCase : List[str] = Spark(_snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 60
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : int = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[int] = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase_ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_input_output_texts(lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return text, ids
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Tuple = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase_ : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase_ : int = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Optional[int] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase_ : Optional[int] = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : List[Any] = "こんにちは、世界。"
UpperCAmelCase_ : List[Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase_ : List[str] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCAmelCase_ : str = tokenizer.encode(lowercase_ , prefix_text=lowercase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : str = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : List[str] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。"
UpperCAmelCase_ : Union[str, Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Dict = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase_ : Any = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase_ : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase_ : Dict = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : Optional[Any] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : str = tokenizer(lowercase_ , prefix_text=lowercase_ ).token_type_ids
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : str = tokenizer.encode("あンいワ" )
UpperCAmelCase_ : List[Any] = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCAmelCase_ : str = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase_ : Dict = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase_ : int = tokenizer.batch_encode_plus(lowercase_ , padding=lowercase_ )
# fmt: off
UpperCAmelCase_ : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase_ : Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase_ : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase_ )
self.assertListEqual(x_token.token_type_ids , lowercase_ )
self.assertListEqual(x_token.attention_mask , lowercase_ )
self.assertListEqual(x_token_a.input_ids , lowercase_ )
self.assertListEqual(x_token_a.token_type_ids , lowercase_ )
self.assertListEqual(x_token_a.attention_mask , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 61
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
_A = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_A = ['a', 'b', 'c', 'd', 'e']
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCamelCase =topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE__ )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
for vertice in vertices:
if vertice not in visited:
__UpperCamelCase =topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# return sort
return sort
if __name__ == "__main__":
_A = topological_sort('a', [], [])
print(sort)
| 62
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50
| 0
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@require_torch
def UpperCamelCase__ ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_a = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__a )
BertModel.from_pretrained(__a )
BertTokenizer.from_pretrained(__a )
pipeline(task="fill-mask" , model=__a )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = "1"
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self : Optional[Any] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_a = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__a )
BertModel.from_pretrained(__a )
BertTokenizer.from_pretrained(__a )
pipeline(task="fill-mask" , model=__a )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self : List[Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = "1"
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self : Optional[Any] ):
_a = "\nfrom transformers import pipeline\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_a = self.get_env()
_a = "1"
_a = [sys.executable, "-c", "\n".join([load, mock, run] )]
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def UpperCamelCase__ ( self : str ):
_a = "\nfrom transformers import AutoModel\n "
_a = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = "1"
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 63
|
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str]=None ):
"""simple docstring"""
_snake_case : List[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_snake_case , _snake_case : Dict = True, True
_snake_case : str = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return path
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : List[str] = 0
_snake_case : List[str] = -1
for i in range(snake_case__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_snake_case : int = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_snake_case , _snake_case : Dict = check_circuit_or_path(snake_case__ , snake_case__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_snake_case : int = 1
if check == 2:
_snake_case : Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_snake_case : Optional[int] = dfs(snake_case__ , snake_case__ , snake_case__ )
print(snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_snake_case : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_snake_case : Optional[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_snake_case : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_snake_case : List[str] = {
1: [],
2: []
# all degree is zero
}
_snake_case : List[Any] = 10
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50
| 0
|
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = sorted(zip(__A, __A ), key=lambda __A : x[0] / x[1], reverse=__A )
UpperCAmelCase__ , UpperCAmelCase__ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase__ = list(accumulate(__A ) )
UpperCAmelCase__ = bisect(__A, __A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50
| 0
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
_A : int = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case_ :Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case_ :str = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
snake_case_ :Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case_ :List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case_ :Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case_ :Union[str, Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case_ :str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
snake_case_ :List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self: Any ) -> str:
snake_case_ :Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case_ :List[str] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case , snake_case )
@slow
@require_torch
def lowerCAmelCase_ ( self: Dict ) -> Dict:
snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(snake_case )
@slow
@require_tf
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[Any] ) -> Union[str, Any]:
snake_case_ :Optional[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 1_573, """token_str""": """ Chris"""},
] , )
snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.2_5_1,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.2_1_4,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
snake_case_ :int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_ :str = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case_ :Any = None
snake_case_ :Tuple = None
self.run_pipeline_test(snake_case , [] )
@require_tf
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case_ :int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case_ :List[str] = None
snake_case_ :List[Any] = None
self.run_pipeline_test(snake_case , [] )
def lowerCAmelCase_ ( self: List[Any] , snake_case: int , snake_case: Tuple , snake_case: Optional[int] ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :str = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = fill_masker.tokenizer
snake_case_ :List[Any] = fill_masker.model
snake_case_ :int = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Optional[int] = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Union[str, Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case , [
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
] , )
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker("""This is""" )
self.run_test_top_k(snake_case , snake_case )
self.run_test_targets(snake_case , snake_case )
self.run_test_top_k_targets(snake_case , snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case )
self.fill_mask_with_multiple_masks(snake_case , snake_case )
def lowerCAmelCase_ ( self: Any , snake_case: int , snake_case: int ) -> int:
snake_case_ :List[str] = tokenizer.get_vocab()
snake_case_ :Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ :Any = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case )
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case )
snake_case_ :Optional[int] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) )
# Call argument
snake_case_ :int = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case )
snake_case_ :Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) )
# Score equivalence
snake_case_ :Tuple = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
snake_case_ :Any = [top_mask["""token_str"""] for top_mask in outputs]
snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
snake_case_ :Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
snake_case_ :int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(snake_case ):
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: List[str] ) -> Union[str, Any]:
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 )
snake_case_ :str = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Optional[int] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: List[Any] ) -> Tuple:
snake_case_ :Tuple = tokenizer.get_vocab()
snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
# top_k=2, ntargets=3
snake_case_ :List[Any] = sorted(vocab.keys() )[:3]
snake_case_ :Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ :str = [el["""token_str"""] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
snake_case_ :Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] , snake_case: Dict ) -> Tuple:
snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ :Optional[Any] = sorted(vocab.keys() )[:3]
snake_case_ :Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ :str = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) , 3 )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict ) -> int:
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Dict = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
] , )
| 66
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 0
|
'''simple docstring'''
import math
import sys
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = ''''''
try:
with open(UpperCamelCase__ , '''rb''' ) as binary_file:
__lowerCamelCase = binary_file.read()
for dat in data:
__lowerCamelCase = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = {'''0''': '''0''', '''1''': '''1'''}
__lowerCamelCase , __lowerCamelCase = '''''', ''''''
__lowerCamelCase = len(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCamelCase = lexicon[curr_string]
result += last_match_id
__lowerCamelCase = last_match_id + '''0'''
if math.loga(UpperCamelCase__ ).is_integer():
__lowerCamelCase = {}
for curr_key in list(UpperCamelCase__ ):
__lowerCamelCase = lexicon.pop(UpperCamelCase__ )
__lowerCamelCase = new_lex
__lowerCamelCase = last_match_id + '''1'''
index += 1
__lowerCamelCase = ''''''
return result
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = 8
try:
with open(UpperCamelCase__ , '''wb''' ) as opened_file:
__lowerCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCamelCase = data_bits[counter:]
__lowerCamelCase = data_bits[counter + 1 :]
return data_bits
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = read_file_binary(UpperCamelCase__ )
__lowerCamelCase = remove_prefix(UpperCamelCase__ )
__lowerCamelCase = decompress_data(UpperCamelCase__ )
write_file_binary(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 67
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : str = set()
lowerCamelCase__ : Any = []
def parse_line(_UpperCAmelCase ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : str = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCamelCase__ : List[str] = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
return values.split(',' )
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50
| 0
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3 , 4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4 , 5 )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase , lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
A__ , A__ = mock_training_loop_function("hello" )
self.assertListEqual(lowercase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase ):
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase , lowercase , lowercase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.memory_allocated()
A__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase )
A__ = release_memory(lowercase )
self.assertEqual(torch.cuda.memory_allocated() , lowercase )
| 68
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "gpt_bigcode"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=5_0257, lowerCAmelCase__=1024, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=None, lowerCAmelCase__="gelu_pytorch_tanh", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.02, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Any:
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = scale_attn_weights
snake_case_ = use_cache
snake_case_ = attention_softmax_in_fpaa
snake_case_ = scale_attention_softmax_in_fpaa
snake_case_ = multi_query
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 69
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50
| 0
|
'''simple docstring'''
import math
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 0 , lowerCAmelCase = 0 ):
"""simple docstring"""
_lowerCAmelCase = end or len(lowerCAmelCase )
for i in range(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = i
_lowerCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowerCAmelCase = array[temp_index - 1]
temp_index -= 1
_lowerCAmelCase = temp_index_value
return array
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): # Max Heap
"""simple docstring"""
_lowerCAmelCase = index
_lowerCAmelCase = 2 * index + 1 # Left Node
_lowerCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowerCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowerCAmelCase = right_index
if largest != index:
_lowerCAmelCase , _lowerCAmelCase = array[largest], array[index]
heapify(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for i in range(n - 1 , 0 , -1 ):
_lowerCAmelCase , _lowerCAmelCase = array[0], array[i]
heapify(lowerCAmelCase , 0 , lowerCAmelCase )
return array
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = low
_lowerCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowerCAmelCase , _lowerCAmelCase = array[j], array[i]
i += 1
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) == 0:
return array
_lowerCAmelCase = 2 * math.ceil(math.loga(len(lowerCAmelCase ) ) )
_lowerCAmelCase = 16
return intro_sort(lowerCAmelCase , 0 , len(lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCAmelCase )
max_depth -= 1
_lowerCAmelCase = median_of_a(lowerCAmelCase , lowerCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
_lowerCAmelCase = partition(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
intro_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = p
return insertion_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Dict =input('''Enter numbers separated by a comma : ''').strip()
A__ : Dict =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 70
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def A ( a_ ,a_ ) -> Union[str, Any]:
__UpperCamelCase : List[Any] =k_size // 2
__UpperCamelCase , __UpperCamelCase : Tuple =mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCamelCase : List[str] =1 / (2 * pi * sigma) * exp(-(square(a_ ) + square(a_ )) / (2 * square(a_ )) )
return g
def A ( a_ ,a_ ,a_ ) -> Any:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =image.shape[0], image.shape[1]
# dst image height and width
__UpperCamelCase : List[str] =height - k_size + 1
__UpperCamelCase : Dict =width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCamelCase : Tuple =zeros((dst_height * dst_width, k_size * k_size) )
__UpperCamelCase : Optional[Any] =0
for i, j in product(range(a_ ) ,range(a_ ) ):
__UpperCamelCase : Union[str, Any] =ravel(image[i : i + k_size, j : j + k_size] )
__UpperCamelCase : List[Any] =window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCamelCase : List[str] =gen_gaussian_kernel(a_ ,a_ )
__UpperCamelCase : Any =ravel(a_ )
# reshape and get the dst image
__UpperCamelCase : int =dot(a_ ,a_ ).reshape(a_ ,a_ ).astype(a_ )
return dst
if __name__ == "__main__":
# read original image
A_ :Any = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
A_ :Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A_ :Any = gaussian_filter(gray, 3, sigma=1)
A_ :Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 71
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
"""simple docstring"""
from collections import defaultdict
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Dict = 1
_lowerCamelCase : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(A_ )
if ret % 2 == 0:
cuts.append(A_ )
return ret
def snake_case_ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = 10, 9
lowerCAmelCase__ = defaultdict(list)
lowerCAmelCase__ = {}
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 72
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = CycleDiffusionPipeline
_UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
_UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
_UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
_UpperCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int]):
torch.manual_seed(0)
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=3_2 ,)
__lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,num_train_timesteps=1_0_0_0 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,)
torch.manual_seed(0)
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0)
__lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
__lowerCamelCase : int = CLIPTextModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowerCamelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]=0):
__lowerCamelCase : str = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : Dict = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = output.images
__lowerCamelCase : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Optional[int] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE__ ,'half'):
__lowerCamelCase : Any = module.half()
__lowerCamelCase : Union[str, Any] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = output.images
__lowerCamelCase : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Optional[int] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Dict):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline')
def lowerCAmelCase ( self : Optional[Any]):
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Any):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : Optional[Any]):
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : Tuple):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
__lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy')
__lowerCamelCase : Optional[int] = init_image.resize((5_1_2, 5_1_2))
__lowerCamelCase : Optional[int] = 'CompVis/stable-diffusion-v1-4'
__lowerCamelCase : Tuple = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder='scheduler')
__lowerCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa ,revision='fp16')
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
pipe.enable_attention_slicing()
__lowerCamelCase : Union[str, Any] = 'A black colored car'
__lowerCamelCase : Optional[int] = 'A blue colored car'
__lowerCamelCase : List[Any] = torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,source_prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,eta=0.1 ,strength=0.85 ,guidance_scale=3 ,source_guidance_scale=1 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image).max() < 5E-1
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy')
__lowerCamelCase : List[str] = init_image.resize((5_1_2, 5_1_2))
__lowerCamelCase : int = 'CompVis/stable-diffusion-v1-4'
__lowerCamelCase : int = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder='scheduler')
__lowerCamelCase : Optional[Any] = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
pipe.enable_attention_slicing()
__lowerCamelCase : str = 'A black colored car'
__lowerCamelCase : List[str] = 'A blue colored car'
__lowerCamelCase : int = torch.manual_seed(0)
__lowerCamelCase : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,source_prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,eta=0.1 ,strength=0.85 ,guidance_scale=3 ,source_guidance_scale=1 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images
assert np.abs(image - expected_image).max() < 2E-2
| 73
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowercase = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
A = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
A = bs[:]
A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
A = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__ , snake_case__ ) )
def _snake_case ( snake_case__ : List[Any] ):
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,A_ : List[str] ,A_ : int ,A_ : int="replace" ,A_ : List[str]="<s>" ,A_ : List[Any]="</s>" ,A_ : Optional[Any]="</s>" ,A_ : List[str]="<s>" ,A_ : int="<unk>" ,A_ : str="<pad>" ,A_ : Union[str, Any]="<mask>" ,A_ : int=False ,**A_ : str ,) -> List[str]:
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else bos_token
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else eos_token
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else sep_token
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else cls_token
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else unk_token
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token
super().__init__(
errors=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,add_prefix_space=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.encoder.items()}
A = errors # how to handle errors in decoding
A = bytes_to_unicode()
A = {v: k for k, v in self.byte_encoder.items()}
with open(A_ ,encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[1:-1]
A = [tuple(merge.split() ) for merge in bpe_merges]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = {}
A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
A = tuple(A_ )
A = get_pairs(A_ )
if not pairs:
return token
while True:
A = min(A_ ,key=lambda A_ : self.bpe_ranks.get(A_ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(A_ ):
try:
A = word.index(A_ ,A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(A_ )
A = new_word
if len(A_ ) == 1:
break
else:
A = get_pairs(A_ )
A = ' '.join(A_ )
A = word
return word
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ) -> Tuple:
A = []
for token in re.findall(self.pat ,A_ ):
A = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Dict ) -> Union[str, Any]:
return self.encoder.get(A_ ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> Dict:
return self.decoder.get(A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> int:
A = ''.join(A_ )
A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
A = 0
with open(A_ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ,A_ : Optional[Any]=False ,**A_ : Tuple ) -> List[Any]:
A = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
A = ' ' + text
return (text, kwargs)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> str:
return token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : "Conversation" ) -> List[int]:
A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
A = ' '.join(A_ )
A = self.encode(A_ )
if len(A_ ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 74
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
def a_ ( __snake_case : list , __snake_case : list , __snake_case : int ) -> int:
"""simple docstring"""
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowerCamelCase_ =[p / w for p, w in zip(__snake_case , __snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowerCamelCase_ =sorted(__snake_case )
# declaring useful variables
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowerCamelCase_ =sorted_profit_by_weight[length - i - 1]
lowerCamelCase_ =profit_by_weight.index(__snake_case )
lowerCamelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
a_ : Optional[int] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
a_ : List[str] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
a_ : int = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 75
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='git_vision_model'
def __init__( self : Dict , a : Optional[Any]=768 , a : Union[str, Any]=3072 , a : Tuple=12 , a : int=12 , a : List[str]=3 , a : str=224 , a : List[str]=16 , a : List[Any]="quick_gelu" , a : Any=1e-5 , a : str=0.0 , a : Optional[int]=0.02 , **a : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
@classmethod
def __UpperCamelCase ( cls : int , a : Union[str, os.PathLike] , **a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
SCREAMING_SNAKE_CASE : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='git'
def __init__( self : Optional[Any] , a : Optional[Any]=None , a : str=3_0522 , a : Dict=768 , a : int=6 , a : Optional[int]=12 , a : List[str]=3072 , a : int="gelu" , a : List[str]=0.1 , a : int=0.1 , a : List[Any]=1024 , a : Union[str, Any]=0.02 , a : Dict=1e-12 , a : Any=0 , a : Any="absolute" , a : List[Any]=True , a : Optional[Any]=False , a : Any=101 , a : Union[str, Any]=102 , a : Optional[Any]=None , **a : Any , ) -> str:
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , pad_token_id=a , **a )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
SCREAMING_SNAKE_CASE : Optional[Any] = GitVisionConfig(**a )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = tie_word_embeddings
SCREAMING_SNAKE_CASE : Tuple = num_image_with_embedding
SCREAMING_SNAKE_CASE : int = bos_token_id
SCREAMING_SNAKE_CASE : Any = eos_token_id
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type
return output
| 76
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50
| 0
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2 ) -> Dict:
lowercase__ : Any = bp_numa
lowercase__ : Optional[int] = bp_numa
lowercase__ : Tuple = bp_numa
lowercase__ : Optional[Any] = conva_get[:2]
lowercase__ : Optional[int] = conva_get[2]
lowercase__ : Optional[Any] = size_pa
lowercase__ : Union[str, Any] = rate_w
lowercase__ : Union[str, Any] = rate_t
lowercase__ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
# save model dict with pickle
lowercase__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(a , 'wb' ) as f:
pickle.dump(a , a )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , a ) -> Any:
# read saved model
with open(a , 'rb' ) as f:
lowercase__ : Optional[int] = pickle.load(a ) # noqa: S301
lowercase__ : Optional[int] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase__ : List[Any] = model_dic.get('size_pooling1' )
lowercase__ : Tuple = model_dic.get('num_bp1' )
lowercase__ : int = model_dic.get('num_bp2' )
lowercase__ : int = model_dic.get('num_bp3' )
lowercase__ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase__ : Tuple = model_dic.get('rate_thre' )
# create model instance
lowercase__ : Tuple = CNN(a , a , a , a , a , a , a )
# modify model parameter
lowercase__ : str = model_dic.get('w_conv1' )
lowercase__ : Optional[int] = model_dic.get('wkj' )
lowercase__ : Tuple = model_dic.get('vji' )
lowercase__ : str = model_dic.get('thre_conv1' )
lowercase__ : Union[str, Any] = model_dic.get('thre_bp2' )
lowercase__ : List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , a ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , a ) -> Any:
return round(a , 3 )
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[str]:
# convolution process
lowercase__ : int = convs[0]
lowercase__ : Optional[Any] = convs[1]
lowercase__ : int = np.shape(a )[0]
# get the data slice of original image data, data_focus
lowercase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a ):
for j_focus in range(0 , size_data - size_conv + 1 , a ):
lowercase__ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a ):
lowercase__ : Any = []
for i_focus in range(len(a ) ):
lowercase__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a ) )
lowercase__ : Optional[Any] = np.asmatrix(a ).reshape(
a , a )
data_featuremap.append(a )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a ) )
lowercase__ : int = np.asarray(a )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , a , a , a="average_pool" ) -> str:
# pooling process
lowercase__ : List[str] = len(featuremaps[0] )
lowercase__ : List[str] = int(size_map / size_pooling )
lowercase__ : str = []
for i_map in range(len(a ) ):
lowercase__ : List[str] = featuremaps[i_map]
lowercase__ : Optional[int] = []
for i_focus in range(0 , a , a ):
for j_focus in range(0 , a , a ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a ) )
lowercase__ : List[Any] = np.asmatrix(a ).reshape(a , a )
featuremap_pooled.append(a )
return featuremap_pooled
def _UpperCAmelCase ( self , a ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase__ : Any = []
for i in range(len(a ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : str = data_listed.getA().tolist()[0]
data_expanded.extend(a )
lowercase__ : int = np.asarray(a )
return data_expanded
def _UpperCAmelCase ( self , a ) -> Dict:
# expanding matrix to one dimension list
lowercase__ : Dict = np.asarray(a )
lowercase__ : Union[str, Any] = np.shape(a )
lowercase__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[Any]:
lowercase__ : Dict = []
lowercase__ : int = 0
for i_map in range(a ):
lowercase__ : str = np.ones((size_map, size_map) )
for i in range(0 , a , a ):
for j in range(0 , a , a ):
lowercase__ : Optional[Any] = pd_pool[
i_pool
]
lowercase__ : Union[str, Any] = i_pool + 1
lowercase__ : List[Any] = np.multiply(
a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a )
return pd_all
def _UpperCAmelCase ( self , a , a , a , a , a , a=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(a )) )
print((' - - Shape: Teach_Data ', np.shape(a )) )
lowercase__ : int = 0
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(a ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : int = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ : Union[str, Any] = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(a , self.size_poolinga )
lowercase__ : Tuple = np.shape(a )
lowercase__ : List[str] = self._expand(a )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Optional[Any] = np.dot(a , self.vji.T ) - self.thre_bpa
lowercase__ : str = self.sig(a )
lowercase__ : Tuple = np.dot(a , self.wkj.T ) - self.thre_bpa
lowercase__ : Any = self.sig(a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Any = np.multiply(
np.dot(a , self.wkj ) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.dot(a , self.vji )
lowercase__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : Any = pd_conva_pooled.T.getA().tolist()
lowercase__ : List[str] = self._calculate_gradient_from_pool(
a , a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Tuple = self.rate_weight * np.dot(a , a )
lowercase__ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : str = rp + 1
lowercase__ : List[str] = error_count / patterns
all_mse.append(a )
def draw_error():
lowercase__ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a , '+-' )
plt.plot(a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , a ) -> List[Any]:
# model predict
lowercase__ : Optional[int] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(a )) )
for p in range(len(a ) ):
lowercase__ : List[str] = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ : Tuple = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Any = self.pooling(a , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(a )
lowercase__ : Optional[Any] = data_bp_input
lowercase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Optional[Any] = self.sig(a )
lowercase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : List[str] = self.sig(a )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Optional[int] = [list(map(self.do_round , a ) ) for each in produce_out]
return np.asarray(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# return the data of image after convoluting process so we can check it out
lowercase__ : Any = np.asmatrix(a )
lowercase__ , lowercase__ : str = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Tuple = self.pooling(a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 77
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 0
snake_case = False
snake_case = 3.0
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase_ = torch.nn.Linear(1_00, 2_00)
lowerCamelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase_ = ''''''
lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 79
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
import argparse
import os
import re
a__ : Any = 'src/diffusers'
# Pattern that looks at the indentation in a line.
a__ : Any = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : List[Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : Union[str, Any] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Dict = re.compile(R'\[([^\]]+)\]')
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def _UpperCamelCase ( __A , __A="" , __A=None , __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
UpperCamelCase__ = ["\n".join(lines[:index] )]
else:
UpperCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__A ) )
if index < len(__A ) - 1:
UpperCamelCase__ = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ = []
else:
blocks.append("\n".join(__A ) )
UpperCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("\n".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def _inner(__A ):
return key(__A ).lower().replace("_" , "" )
return _inner
def _UpperCamelCase ( __A , __A=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__A ):
return x
if key is None:
UpperCamelCase__ = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ = [obj for obj in objects if not key(__A )[0].isupper()]
UpperCamelCase__ = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
def _replace(__A ):
UpperCamelCase__ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] ) + "]"
UpperCamelCase__ = import_statement.split("\n" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ = 2 if lines[1].strip() == "[" else 1
UpperCamelCase__ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ = sort_objects(__A , key=lambda __A : x[1] )
UpperCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
UpperCamelCase__ = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ = _re_bracket_content.sub(_replace , __A )
return import_statement
def _UpperCamelCase ( __A , __A=True ) -> Optional[int]:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ = split_code_in_indented_blocks(
__A , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ = main_blocks[block_idx]
UpperCamelCase__ = block.split("\n" )
# Get to the start of the imports.
UpperCamelCase__ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ = "\n".join(block_lines[line_idx:-1] )
UpperCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ = [(i, key) for i, key in enumerate(__A ) if key is not None]
UpperCamelCase__ = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ = 0
UpperCamelCase__ = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__A , "w" ) as f:
f.write("\n".join(__A ) )
def _UpperCamelCase ( __A=True ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCamelCase__ = sort_imports(os.path.join(__A , "__init__.py" ) , check_only=__A )
if result:
UpperCamelCase__ = [os.path.join(__A , "__init__.py" )]
if len(__A ) > 0:
raise ValueError(F'''Would overwrite {len(__A )} files, run `make style`.''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 80
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.