code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def lowercase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) as f:
__a = json.load(lowerCAmelCase__ )
__a = {}
__a = []
__a = []
for key, info in class_info.items():
__a = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
__a = thing_ids
__a = class_names
return metadata
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=None , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=10 , _a=False , _a=255 , _a="shi-labs/oneformer_demo" , _a="ade20k_panoptic.json" , _a=10 , ):
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = {'''shortest_edge''': 32, '''longest_edge''': 1_333} if size is None else size
__a = do_normalize
__a = image_mean
__a = image_std
__a = class_info_file
__a = prepare_metadata(_a , _a )
__a = num_text
__a = repo_path
# for the post_process_functions
__a = 2
__a = 10
__a = 10
__a = 3
__a = 4
__a = num_labels
__a = do_reduce_labels
__a = ignore_index
def __UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __UpperCAmelCase ( self , _a , _a=False ):
if not batched:
__a = image_inputs[0]
if isinstance(_a , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size['''shortest_edge'''] * h / w )
__a = self.size['''shortest_edge''']
elif w > h:
__a = self.size['''shortest_edge''']
__a = int(self.size['''shortest_edge'''] * w / h )
else:
__a = self.size['''shortest_edge''']
__a = self.size['''shortest_edge''']
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(_a , key=lambda _a : item[0] )[0]
__a = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
def __UpperCAmelCase ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCAmelCase : List[str] = image_processing_class
def __UpperCAmelCase ( self ):
__a = OneFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''ignore_index''' ) )
self.assertTrue(hasattr(_a , '''class_info_file''' ) )
self.assertTrue(hasattr(_a , '''num_text''' ) )
self.assertTrue(hasattr(_a , '''repo_path''' ) )
self.assertTrue(hasattr(_a , '''metadata''' ) )
self.assertTrue(hasattr(_a , '''do_reduce_labels''' ) )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processing_tester.get_expected_values(_a , batched=_a )
__a = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__a = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processing_tester.get_expected_values(_a , batched=_a )
__a = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__a = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processing_tester.get_expected_values(_a , batched=_a )
__a = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self , _a=False , _a=False , _a="np" ):
__a = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__a = self.image_processing_tester.num_labels
__a = None
__a = None
__a = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
if with_segmentation_maps:
__a = num_labels
if is_instance_map:
__a = list(range(_a ) ) * 2
__a = dict(enumerate(_a ) )
__a = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__a = [Image.fromarray(_a ) for annotation in annotations]
__a = image_processor(
_a , ['''semantic'''] * len(_a ) , _a , return_tensors='''pt''' , instance_id_to_semantic_id=_a , pad_and_return_pixel_mask=_a , )
return inputs
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
def common(_a=False , _a=None ):
__a = self.comm_get_image_processor_inputs(
with_segmentation_maps=_a , is_instance_map=_a , segmentation_type=_a )
__a = inputs['''mask_labels''']
__a = inputs['''class_labels''']
__a = inputs['''pixel_values''']
__a = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_a , _a , _a ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_a ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_a )
common(is_instance_map=_a , segmentation_type='''pil''' )
common(is_instance_map=_a , segmentation_type='''pil''' )
def __UpperCAmelCase ( self ):
__a = np.zeros((20, 50) )
__a = 1
__a = 1
__a = 1
__a = binary_mask_to_rle(_a )
self.assertEqual(len(_a ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__a = self.image_processing_tester.get_fake_oneformer_outputs()
__a = fature_extractor.post_process_semantic_segmentation(_a )
self.assertEqual(len(_a ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__a = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__a = fature_extractor.post_process_semantic_segmentation(_a , target_sizes=_a )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__a = self.image_processing_tester.get_fake_oneformer_outputs()
__a = image_processor.post_process_instance_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _a )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__a = self.image_processing_tester.get_fake_oneformer_outputs()
__a = image_processor.post_process_panoptic_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _a )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 65 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Any=None ) -> int:
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : List[str] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__UpperCAmelCase : List[int] = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__UpperCAmelCase : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Use FP16 to accelerate inference.'} )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Benchmark training of model'} )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Verbose memory tracing'} )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Trace memory line by line'} )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Save result to a CSV file'} )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Save all print statements in a log file'} )
__UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to print environment information'} )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__UpperCAmelCase : str = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__UpperCAmelCase : str = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__UpperCAmelCase : str = field(
default=f'train_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__UpperCAmelCase : str = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__UpperCAmelCase : str = field(
default=f'env_info_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving environment information.'} , )
__UpperCAmelCase : str = field(
default=f'log_{round(time() )}.csv' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__UpperCAmelCase : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def __UpperCAmelCase ( self ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _a , )
def __UpperCAmelCase ( self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __UpperCAmelCase ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 65 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : Any ) -> Union[str, Any]:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> Dict:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Union[str, Any]:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> int:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> Union[str, Any]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 1 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase_ = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def lowercase ( lowerCAmelCase__ : Any ) -> Tuple:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
lowercase_ = parser.parse_args()
if args.check_lib:
lowercase_ = importlib.import_module("transformers")
lowercase_ = Path(transformers_module.__file__).parent
else:
lowercase_ = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 65 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['onnx']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int | float | str ) -> tuple[int, int]:
try:
__a = float(lowerCAmelCase__ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__a = decimal - int(lowerCAmelCase__ )
if fractional_part == 0:
return int(lowerCAmelCase__ ), 1
else:
__a = len(str(lowerCAmelCase__ ).split('''.''' )[1] )
__a = int(decimal * (10**number_of_frac_digits) )
__a = 10**number_of_frac_digits
__a , __a = denominator, numerator
while True:
__a = dividend % divisor
if remainder == 0:
break
__a , __a = divisor, remainder
__a , __a = numerator / divisor, denominator / divisor
return int(lowerCAmelCase__ ), int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 65 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ):
__a = d_model
__a = parent
__a = batch_size
__a = prediction_length
__a = context_length
__a = cardinality
__a = num_time_features
__a = lags_sequence
__a = embedding_dimension
__a = is_training
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = context_length
__a = prediction_length + label_length
__a = label_length
__a = moving_average
__a = autocorrelation_factor
def __UpperCAmelCase ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __UpperCAmelCase ( self , _a ):
__a = config.context_length + max(config.lags_sequence )
__a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, _past_length] )
__a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, config.prediction_length] )
__a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = AutoformerModel(config=_a ).to(_a ).eval()
__a = model(**_a )
__a = outputs.encoder_last_hidden_state
__a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_encoder()
encoder.save_pretrained(_a )
__a = AutoformerEncoder.from_pretrained(_a ).to(_a )
__a , __a , __a , __a , __a = model.create_network_inputs(**_a )
__a , __a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_decoder()
decoder.save_pretrained(_a )
__a = AutoformerDecoder.from_pretrained(_a ).to(_a )
__a = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : int = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = AutoformerModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a , __a = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['''missing_keys'''] , [] )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = inspect.signature(getattr(_a , '''forward''' ) )
# The main input is the name of the argument after `self`
__a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''seq_length''' , _a )
__a = getattr(self.model_tester , '''decoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''encoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''d_model''' , _a )
__a = getattr(self.model_tester , '''num_attention_heads''' , _a )
__a = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a = len(_a )
__a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
__a = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __UpperCAmelCase ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( lowerCAmelCase__ : Optional[int]="train-batch.pt" ) -> List[str]:
__a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase__ , repo_type='''dataset''' )
__a = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch()
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
__a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
__a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1E-1 ) )
| 65 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Any:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__a = np.concatenate(lowerCAmelCase__ , axis=0 )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=0.99_95 ) -> int:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(lowerCAmelCase__ )
__a = np.sin(lowerCAmelCase__ )
__a = theta_a * t
__a = np.sin(lowerCAmelCase__ )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> List[str]:
for param in model.parameters():
__a = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , _a )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__a = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__a = torch.cat(_a , dim=0 )
else:
__a = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8215 * init_latents
__a = init_latents.repeat_interleave(_a , dim=0 )
__a = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__a = self.scheduler.add_noise(_a , _a , _a )
__a = init_latents
return latents
def __UpperCAmelCase ( self , _a ):
__a = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.feature_extractor.preprocess(_a )
__a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(_a )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * sample
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(_a )
__a = self.normalize(_a ).to(latents.dtype )
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
# get prompt text embeddings for content and style
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(_a , _a , self.device )
__a = timesteps[:1].repeat(_a )
# Preprocess image
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(_a , _a )
__a = self.get_clip_image_embeddings(_a , _a )
__a = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
__a = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 65 | 1 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
return self.get_dummy_input()
@property
def __UpperCAmelCase ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __UpperCAmelCase ( self , _a=True , _a=False , _a=False , _a=False , ):
__a = 4
__a = 32
__a = (32, 32)
__a = torch.manual_seed(0 )
__a = torch.device(_a )
__a = (batch_size, num_channels) + sizes
__a = randn_tensor(_a , generator=_a , device=_a )
__a = {'''hidden_states''': hidden_states}
if include_temb:
__a = 128
__a = randn_tensor((batch_size, temb_channels) , generator=_a , device=_a )
if include_res_hidden_states_tuple:
__a = torch.manual_seed(1 )
__a = (randn_tensor(_a , generator=_a , device=_a ),)
if include_encoder_hidden_states:
__a = floats_tensor((batch_size, 32, 32) ).to(_a )
if include_skip_sample:
__a = randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a )
return dummy_input
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
__a = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self , _a ):
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.block_class(**_a )
unet_block.to(_a )
unet_block.eval()
with torch.no_grad():
__a = unet_block(**_a )
if isinstance(_a , _a ):
__a = output[0]
self.assertEqual(output.shape , self.output_shape )
__a = output[0, -1, -3:, -3:]
__a = torch.tensor(_a ).to(_a )
assert torch_all_close(output_slice.flatten() , _a , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __UpperCAmelCase ( self ):
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.block_class(**_a )
model.to(_a )
model.train()
__a = model(**_a )
if isinstance(_a , _a ):
__a = output[0]
__a = torch.device(_a )
__a = randn_tensor(output.shape , device=_a )
__a = torch.nn.functional.mse_loss(_a , _a )
loss.backward()
| 65 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
import operator as op
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__a = []
__a = lambda lowerCAmelCase__ , lowerCAmelCase__ : int(x / y ) # noqa: E731 integer division operation
__a = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(lowerCAmelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCAmelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase__ ) , sep=''' | ''' )
else:
__a = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase__ ) , sep=''' | ''' )
__a = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
lowercase_ = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 65 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , _a , _a ):
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# No kwarg
__a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
__a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 65 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'vision-encoder-decoder'
__UpperCAmelCase : Union[str, Any] = True
def __init__( self , **_a ):
super().__init__(**_a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__a = kwargs.pop('''encoder''' )
__a = encoder_config.pop('''model_type''' )
__a = kwargs.pop('''decoder''' )
__a = decoder_config.pop('''model_type''' )
__a = AutoConfig.for_model(_a , **_a )
__a = AutoConfig.for_model(_a , **_a )
__a = True
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__a = True
__a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.encoder.to_dict()
__a = self.decoder.to_dict()
__a = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
@property
def __UpperCAmelCase ( self ):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
__a = OrderedDict()
__a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__a = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
import torch
__a = OrderedDict()
__a = super().generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
__a , __a = dummy_input['''input_ids'''].shape
__a = (batch, encoder_sequence, self._config.encoder_hidden_size)
__a = dummy_input.pop('''input_ids''' )
__a = dummy_input.pop('''attention_mask''' )
__a = torch.zeros(_a )
return common_inputs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self , _a ):
return VisionEncoderDecoderEncoderOnnxConfig(_a )
def __UpperCAmelCase ( self , _a , _a , _a = "default" ):
__a = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_a , _a )
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__a = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
__a = ort.SessionOptions()
__a = False
return options
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 65 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 | 1 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowercase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
lowercase_ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
lowercase_ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowercase_ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(6_4, 6_4)
)
lowercase_ = tf.keras.preprocessing.image.img_to_array(test_image)
lowercase_ = np.expand_dims(test_image, axis=0)
lowercase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowercase_ = "Normal"
if result[0][0] == 1:
lowercase_ = "Abnormality detected"
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65 | 1 |
"""simple docstring"""
lowercase_ = {str(digit): digit**5 for digit in range(1_0)}
def lowercase ( lowerCAmelCase__ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase__ ) )
def lowercase ( ) -> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCAmelCase__ ) )
if __name__ == "__main__":
print(solution())
| 65 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__a = sorted(string.lower() )
return len(lowerCAmelCase__ ) == len(set(lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase_ = input("Enter a string ").strip()
lowercase_ = is_isogram(input_str)
print(F'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 65 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCAmelCase ( self , _a ):
# configuration for running training on smdistributed Model Parallel
__a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCAmelCase ( self , _a ):
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , _a ):
# create estimator
__a = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 65 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
lowercase_ = {"target_lang": "fi", "source_lang": "en"}
lowercase_ = ">>zh<<"
lowercase_ = "Helsinki-NLP/"
if is_torch_available():
lowercase_ = "pt"
elif is_tf_available():
lowercase_ = "tf"
else:
lowercase_ = "jax"
@require_sentencepiece
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = MarianTokenizer
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[str] = True
def __UpperCAmelCase ( self ):
super().setUp()
__a = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **_a ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self , _a ):
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase ( self ):
__a = '''</s>'''
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_a ) , 9 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCAmelCase ( self ):
__a = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
__a = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_a )
self.assertIsInstance(_a , _a )
__a = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_a , batch.input_ids[0] )
__a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a )
__a = [x.name for x in Path(_a ).glob('''*''' )]
self.assertIn('''source.spm''' , _a )
MarianTokenizer.from_pretrained(_a )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCAmelCase ( self ):
# fmt: off
__a = {'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __UpperCAmelCase ( self ):
__a = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__a = '''Tämä on testi'''
__a = '''This is a test'''
__a = [76, 7, 2_047, 2]
__a = [69, 12, 11, 940, 2]
__a = tokenizer(_a ).input_ids
self.assertListEqual(_a , _a )
__a = tokenizer(text_target=_a ).input_ids
self.assertListEqual(_a , _a )
__a = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 65 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 65 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 0 , _a = False , _a = 1 , ):
super().__init__()
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
__a = nn.BatchNormad(_a )
__a = nn.ReLU()
def __UpperCAmelCase ( self , _a ):
__a = self.conv(_a )
__a = self.batch_norm(_a )
__a = self.activation(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = input
for layer in self.layers:
__a = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(_a ):
__a = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = []
for ppm in self.blocks:
__a = ppm(_a )
__a = nn.functional.interpolate(
_a , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(_a , self.channels , kernel_size=1 )
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
__a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(_a ) )
__a = torch.cat(_a , dim=1 )
__a = self.bottleneck(_a )
return output
def __UpperCAmelCase ( self , _a ):
# build laterals
__a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
__a = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__a = torch.cat(_a , dim=1 )
__a = self.fpn_bottleneck(_a )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = 2 , _a = 3 , _a = 1 ):
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*_a )
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
# just take the relevant feature maps
__a = encoder_hidden_states[self.in_index]
__a = self.convs(_a )
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = UperNetConfig
__UpperCAmelCase : Optional[Any] = 'pixel_values'
__UpperCAmelCase : List[str] = True
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self , _a , _a=False ):
if isinstance(_a , _a ):
__a = value
lowercase_ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__a = UperNetHead(_a , in_channels=self.backbone.channels )
__a = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
__a = outputs.feature_maps
__a = self.decode_head(_a )
__a = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(_a )
__a = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__a = loss_fct(_a , _a )
__a = loss_fct(_a , _a )
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase ( ) -> Dict:
__a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowerCAmelCase__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowerCAmelCase__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowerCAmelCase__ )
return parser.parse_args()
def lowercase ( ) -> List[str]:
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'Wav2Vec2FeatureExtractor'
__UpperCAmelCase : List[str] = 'AutoTokenizer'
def __init__( self , _a , _a ):
super().__init__(_a , _a )
__a = self.feature_extractor
__a = False
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _a , )
__a = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
__a = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *_a , **_a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__a = kwargs.pop('''raw_speech''' )
else:
__a = kwargs.pop('''audio''' , _a )
__a = kwargs.pop('''sampling_rate''' , _a )
__a = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__a = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
__a = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def __UpperCAmelCase ( self , *_a , **_a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
__a = kwargs.pop('''input_features''' , _a )
__a = kwargs.pop('''labels''' , _a )
if len(_a ) > 0:
__a = args[0]
__a = args[1:]
if input_features is not None:
__a = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
__a = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a = labels['''input_ids''']
return input_features
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def __UpperCAmelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> list:
__a = len(lowerCAmelCase__ )
__a = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Tuple=True ) -> int:
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__a , __a , __a , __a = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__a = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
__a = config_class.from_json_file(lowerCAmelCase__ )
__a = True
__a = True
print(f'''Building TensorFlow model from configuration: {config}''' )
__a = model_class(lowerCAmelCase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__a = cached_file(
lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__a = load_pytorch_checkpoint_in_tfa_model(lowerCAmelCase__ , lowerCAmelCase__ )
if compare_with_pt_model:
__a = tf_model(tf_model.dummy_inputs , training=lowerCAmelCase__ ) # build the network
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ , config=lowerCAmelCase__ , state_dict=lowerCAmelCase__ )
with torch.no_grad():
__a = pt_model(**pt_model.dummy_inputs )
__a = pto[0].numpy()
__a = tfo[0].numpy()
__a = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowerCAmelCase__ , save_format='''h5''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Union[str, Any]=False , ) -> int:
if args_model_type is None:
__a = list(MODEL_CLASSES.keys() )
else:
__a = [args_model_type]
for j, model_type in enumerate(lowerCAmelCase__ , start=1 ):
print('''=''' * 100 )
print(f''' Converting model type {j}/{len(lowerCAmelCase__ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__a , __a , __a , __a , __a = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__a = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__a = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCAmelCase__ , lowerCAmelCase__ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__a = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(lowerCAmelCase__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__a = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
else:
__a = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__a = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
else:
__a = model_shortcut_name
if os.path.isfile(lowerCAmelCase__ ):
__a = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=lowerCAmelCase__ , pytorch_checkpoint_path=lowerCAmelCase__ , config_file=lowerCAmelCase__ , tf_dump_path=os.path.join(lowerCAmelCase__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=lowerCAmelCase__ , )
if remove_cached_files:
os.remove(lowerCAmelCase__ )
os.remove(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 65 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowercase_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowercase_ = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
__a = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__a = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Tuple:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[1:-1]
__a = [tuple(merge.split() ) for merge in bpe_merges]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __UpperCAmelCase ( self ):
return len(self.encoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _a ):
if token in self.cache:
return self.cache[token]
__a = tuple(_a )
__a = get_pairs(_a )
if not pairs:
return token
while True:
__a = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_a ):
try:
__a = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_a )
__a = new_word
if len(_a ) == 1:
break
else:
__a = get_pairs(_a )
__a = ''' '''.join(_a )
__a = word
return word
def __UpperCAmelCase ( self , _a ):
__a = []
for token in re.findall(self.pat , _a ):
__a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(''' ''' ) )
return bpe_tokens
def __UpperCAmelCase ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _a ):
return self.decoder.get(_a )
def __UpperCAmelCase ( self , _a ):
__a = ''''''.join(_a )
__a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
__a = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a=False , **_a ):
__a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
__a = ''' ''' + text
return (text, kwargs)
| 65 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'text': Value('string' )} )
__UpperCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
__UpperCAmelCase : str = "text"
__UpperCAmelCase : str = "labels"
def __UpperCAmelCase ( self , _a ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def __UpperCAmelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> str:
# Checks if the entire collection has been sorted
if len(lowerCAmelCase__ ) <= 1 or n <= 1:
return
insert_next(lowerCAmelCase__ , n - 1 )
rec_insertion_sort(lowerCAmelCase__ , n - 1 )
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> Optional[Any]:
# Checks order between adjacent elements
if index >= len(lowerCAmelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__a , __a = (
collection[index],
collection[index - 1],
)
insert_next(lowerCAmelCase__ , index + 1 )
if __name__ == "__main__":
lowercase_ = input("Enter integers separated by spaces: ")
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 65 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 65 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
lowercase_ = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = AlbertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ['image_processor', 'tokenizer']
__UpperCAmelCase : int = 'BlipImageProcessor'
__UpperCAmelCase : Dict = 'AutoTokenizer'
def __init__( self , _a , _a , _a ):
super().__init__(_a , _a )
# add QFormer tokenizer
__a = qformer_tokenizer
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__a = BatchFeature()
if text is not None:
__a = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
encoding.update(_a )
__a = self.qformer_tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
__a = qformer_text_encoding.pop('''input_ids''' )
__a = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__a = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCAmelCase ( self ):
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __UpperCAmelCase ( self , _a , **_a ):
if os.path.isfile(_a ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_a , exist_ok=_a )
__a = os.path.join(_a , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a , **_a )
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
__a = AutoTokenizer.from_pretrained(_a , subfolder='''qformer_tokenizer''' )
__a = cls._get_arguments_from_pretrained(_a , **_a )
args.append(_a )
return cls(*_a )
| 65 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['onnx']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
| 65 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , _a , _a ):
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# No kwarg
__a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
__a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 65 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ):
__a = d_model
__a = parent
__a = batch_size
__a = prediction_length
__a = context_length
__a = cardinality
__a = num_time_features
__a = lags_sequence
__a = embedding_dimension
__a = is_training
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = context_length
__a = prediction_length + label_length
__a = label_length
__a = moving_average
__a = autocorrelation_factor
def __UpperCAmelCase ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __UpperCAmelCase ( self , _a ):
__a = config.context_length + max(config.lags_sequence )
__a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, _past_length] )
__a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, config.prediction_length] )
__a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = AutoformerModel(config=_a ).to(_a ).eval()
__a = model(**_a )
__a = outputs.encoder_last_hidden_state
__a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_encoder()
encoder.save_pretrained(_a )
__a = AutoformerEncoder.from_pretrained(_a ).to(_a )
__a , __a , __a , __a , __a = model.create_network_inputs(**_a )
__a , __a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_decoder()
decoder.save_pretrained(_a )
__a = AutoformerDecoder.from_pretrained(_a ).to(_a )
__a = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : int = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = AutoformerModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a , __a = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['''missing_keys'''] , [] )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = inspect.signature(getattr(_a , '''forward''' ) )
# The main input is the name of the argument after `self`
__a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''seq_length''' , _a )
__a = getattr(self.model_tester , '''decoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''encoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''d_model''' , _a )
__a = getattr(self.model_tester , '''num_attention_heads''' , _a )
__a = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a = len(_a )
__a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
__a = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __UpperCAmelCase ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( lowerCAmelCase__ : Optional[int]="train-batch.pt" ) -> List[str]:
__a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase__ , repo_type='''dataset''' )
__a = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch()
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
__a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
__a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1E-1 ) )
| 65 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Any:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__a = np.concatenate(lowerCAmelCase__ , axis=0 )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=0.99_95 ) -> int:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(lowerCAmelCase__ )
__a = np.sin(lowerCAmelCase__ )
__a = theta_a * t
__a = np.sin(lowerCAmelCase__ )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> List[str]:
for param in model.parameters():
__a = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , _a )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__a = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__a = torch.cat(_a , dim=0 )
else:
__a = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8215 * init_latents
__a = init_latents.repeat_interleave(_a , dim=0 )
__a = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__a = self.scheduler.add_noise(_a , _a , _a )
__a = init_latents
return latents
def __UpperCAmelCase ( self , _a ):
__a = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.feature_extractor.preprocess(_a )
__a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(_a )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * sample
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(_a )
__a = self.normalize(_a ).to(latents.dtype )
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
# get prompt text embeddings for content and style
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(_a , _a , self.device )
__a = timesteps[:1].repeat(_a )
# Preprocess image
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(_a , _a )
__a = self.get_clip_image_embeddings(_a , _a )
__a = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
__a = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 65 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase ( ) -> int:
__a = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__a = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
__a = parser.parse_args()
if not hasattr(lowerCAmelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__a = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int=None ) -> List[Any]:
__a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__a , __a = True, True
__a = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return path
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Tuple:
__a = 0
__a = -1
for i in range(lowerCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Optional[int]:
__a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__a , __a = check_circuit_or_path(lowerCAmelCase__ , lowerCAmelCase__ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
__a = 1
if check == 2:
__a = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
__a = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
def lowercase ( ) -> List[Any]:
__a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__a = {
1: [],
2: []
# all degree is zero
}
__a = 10
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , _a , _a ):
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# No kwarg
__a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
__a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 65 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = KandinskyVaaControlnetPipeline
__UpperCAmelCase : List[str] = ['image_embeds', 'negative_image_embeds', 'hint']
__UpperCAmelCase : Dict = ['image_embeds', 'negative_image_embeds', 'hint']
__UpperCAmelCase : Optional[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCAmelCase : str = False
@property
def __UpperCAmelCase ( self ):
return 32
@property
def __UpperCAmelCase ( self ):
return 32
@property
def __UpperCAmelCase ( self ):
return self.time_input_dim
@property
def __UpperCAmelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
return 100
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**_a )
return model
@property
def __UpperCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ):
__a = self.dummy_unet
__a = self.dummy_movq
__a = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
__a = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = pipe(**self.get_dummy_inputs(_a ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__a = torch.from_numpy(np.array(_a ) ).float() / 255.0
__a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
__a = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__a = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
__a = '''A robot, 4k photo'''
__a = torch.Generator(device='''cuda''' ).manual_seed(0 )
__a , __a = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = torch.Generator(device='''cuda''' ).manual_seed(0 )
__a = pipeline(
image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=100 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__a = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase_ = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
lowercase_ = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __UpperCAmelCase ( self ):
return len(self.sp_model )
def __UpperCAmelCase ( self ):
__a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _a ):
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _a ):
if self.remove_space:
__a = ''' '''.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a = unicodedata.normalize('''NFKD''' , _a )
__a = ''''''.join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def __UpperCAmelCase ( self , _a ):
__a = self.preprocess_text(_a )
__a = self.sp_model.encode(_a , out_type=_a )
__a = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __UpperCAmelCase ( self , _a ):
return self.sp_model.PieceToId(_a )
def __UpperCAmelCase ( self , _a ):
return self.sp_model.IdToPiece(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
__a = ''''''
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__a = True
__a = []
else:
current_sub_tokens.append(_a )
__a = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 65 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 | 1 |
"""simple docstring"""
import requests
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None:
__a = {'''Content-Type''': '''application/json'''}
__a = requests.post(lowerCAmelCase__ , json={'''text''': message_body} , headers=lowerCAmelCase__ )
if response.status_code != 200:
__a = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def lowercase ( ) -> None:
__a = [90, 23, 6, 33, 21, 65, 123, 34423]
__a = math.log(len(lowerCAmelCase__ ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 65 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> bool:
__a = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase ( lowerCAmelCase__ : int = 5000 ) -> int:
__a = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a = pentagonal_nums[j]
__a = pentagonal_i + pentagonal_j
__a = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str="pt" ) -> Tuple:
__a = {'''add_prefix_space''': True} if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not line.startswith(''' ''' ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=lowerCAmelCase__ , padding='''max_length''' if pad_to_max_length else None , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , ) -> Dict:
__a = input_ids.ne(lowerCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a="train" , _a=None , _a=None , _a=None , _a="" , ):
super().__init__()
__a = Path(_a ).joinpath(type_path + '''.source''' )
__a = Path(_a ).joinpath(type_path + '''.target''' )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _a ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , _a ).rstrip('''\n''' )
__a = linecache.getline(str(self.tgt_file ) , _a ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _a ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , _a ) else self.tokenizer
__a = encode_line(_a , _a , self.max_source_length , '''right''' )
__a = encode_line(_a , _a , self.max_target_length , '''right''' )
__a = source_inputs['''input_ids'''].squeeze()
__a = target_inputs['''input_ids'''].squeeze()
__a = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( _a ):
return [len(_a ) for x in Path(_a ).open().readlines()]
def __UpperCAmelCase ( self , _a ):
__a = torch.stack([x['''input_ids'''] for x in batch] )
__a = torch.stack([x['''attention_mask'''] for x in batch] )
__a = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
__a = trim_batch(_a , _a )
__a , __a = trim_batch(_a , _a , attention_mask=_a )
__a = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase_ = getLogger(__name__)
def lowercase ( lowerCAmelCase__ : List[List] ) -> int:
return list(itertools.chain.from_iterable(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : str ) -> None:
__a = get_git_info()
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''git_log.json''' ) )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=4 , **lowerCAmelCase__ : Any ) -> int:
with open(lowerCAmelCase__ , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
with open(lowerCAmelCase__ ) as f:
return json.load(lowerCAmelCase__ )
def lowercase ( ) -> Optional[int]:
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowercase ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : Iterable ) -> List:
return list(map(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
with open(lowerCAmelCase__ , '''wb''' ) as f:
return pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
def remove_articles(lowerCAmelCase__ : Dict ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ : Tuple ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ : Tuple ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = normalize_answer(lowerCAmelCase__ ).split()
__a = normalize_answer(lowerCAmelCase__ ).split()
__a = Counter(lowerCAmelCase__ ) & Counter(lowerCAmelCase__ )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(lowerCAmelCase__ )
__a = 1.0 * num_same / len(lowerCAmelCase__ )
__a = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ) -> int:
return normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] ) -> Dict:
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__a = 0
for hypo, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
em += exact_match_score(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
em /= len(lowerCAmelCase__ )
return {"em": em}
def lowercase ( lowerCAmelCase__ : int ) -> Tuple:
return model_prefix.startswith('''rag''' )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> List[str]:
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and not hasattr(lowerCAmelCase__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
continue
__a = p if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) else equivalent_param[p]
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
return hparams, config
| 65 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCAmelCase ( self , _a ):
# configuration for running training on smdistributed Model Parallel
__a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCAmelCase ( self , _a ):
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , _a ):
# create estimator
__a = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 65 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 65 | 1 |
"""simple docstring"""
lowercase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowercase ( lowerCAmelCase__ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowercase ( lowerCAmelCase__ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowercase ( ) -> None:
__a = '''Morse code here!'''
print(lowerCAmelCase__ )
__a = encrypt(lowerCAmelCase__ )
print(lowerCAmelCase__ )
__a = decrypt(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 | 1 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a=None , _a=None , _a=None , **_a ):
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def __UpperCAmelCase ( self , _a , **_a ):
__a = self.framework
__a = self.tokenizer(_a , return_tensors=_a , **_a )
return model_inputs
def __UpperCAmelCase ( self , _a ):
__a = self.model(**_a )
return model_outputs
def __UpperCAmelCase ( self , _a , _a=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_a , **_a ):
return super().__call__(*_a , **_a )
| 65 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65 | 1 |
"""simple docstring"""
lowercase_ = [
(1_0_0_0, "M"),
(9_0_0, "CM"),
(5_0_0, "D"),
(4_0_0, "CD"),
(1_0_0, "C"),
(9_0, "XC"),
(5_0, "L"),
(4_0, "XL"),
(1_0, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowercase ( lowerCAmelCase__ : str ) -> int:
__a = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
__a = 0
__a = 0
while place < len(lowerCAmelCase__ ):
if (place + 1 < len(lowerCAmelCase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase ( lowerCAmelCase__ : int ) -> str:
__a = []
for arabic, roman in ROMAN:
((__a) , (__a)) = divmod(lowerCAmelCase__ , lowerCAmelCase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def lowercase ( ) -> Node | None:
__a = Node(1 )
__a = Node(2 )
__a = Node(3 )
__a = Node(4 )
__a = Node(5 )
return tree
def lowercase ( lowerCAmelCase__ : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase ( lowerCAmelCase__ : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase ( lowerCAmelCase__ : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase ( lowerCAmelCase__ : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase ( lowerCAmelCase__ : Node | None ) -> Sequence[Node | None]:
__a = []
if root is None:
return output
__a = deque([root] )
while process_queue:
__a = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> Sequence[Node | None]:
__a = []
def populate_output(lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def lowercase ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> Sequence[Node | None]:
__a = []
def populate_output(lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def lowercase ( lowerCAmelCase__ : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a = []
__a = 0
__a = height(lowerCAmelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase__ , lowerCAmelCase__ ) )
__a = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase__ , lowerCAmelCase__ ) )
__a = 0
return output
def lowercase ( ) -> None: # Main function for testing.
__a = make_tree()
print(f'''In-order Traversal: {inorder(lowerCAmelCase__ )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCAmelCase__ )}''' )
print(f'''Post-order Traversal: {postorder(lowerCAmelCase__ )}''' , '''\n''' )
print(f'''Height of Tree: {height(lowerCAmelCase__ )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCAmelCase__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCAmelCase__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(lowerCAmelCase__ , level=lowerCAmelCase__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> list:
__a = len(lowerCAmelCase__ )
__a = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_a , '''num_heads''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.02 , _a=1E-12 , _a=True , _a=True , _a=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_sizes
__a = patch_stride
__a = patch_padding
__a = is_training
__a = use_labels
__a = num_labels
__a = num_channels
__a = embed_dim
__a = num_heads
__a = stride_kv
__a = depth
__a = cls_token
__a = attention_drop_rate
__a = initializer_range
__a = layer_norm_eps
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
# create a random int32 tensor of given shape
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = TFCvtModel(config=_a )
__a = model(_a , training=_a )
__a = (self.image_size, self.image_size)
__a , __a = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = self.num_labels
__a = TFCvtForImageClassification(_a )
__a = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : List[Any] = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = False
def __UpperCAmelCase ( self ):
__a = TFCvtModelTester(self )
__a = TFCvtConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def __UpperCAmelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __UpperCAmelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def __UpperCAmelCase ( self ):
__a = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = len(self.model_tester.depth )
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFCvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> Dict:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
__a = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''tf''' )
# forward pass
__a = model(**_a )
# verify the logits
__a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1E-4 ) )
| 65 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowercase_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowercase_ = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
__a = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__a = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Tuple:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[1:-1]
__a = [tuple(merge.split() ) for merge in bpe_merges]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __UpperCAmelCase ( self ):
return len(self.encoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _a ):
if token in self.cache:
return self.cache[token]
__a = tuple(_a )
__a = get_pairs(_a )
if not pairs:
return token
while True:
__a = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_a ):
try:
__a = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_a )
__a = new_word
if len(_a ) == 1:
break
else:
__a = get_pairs(_a )
__a = ''' '''.join(_a )
__a = word
return word
def __UpperCAmelCase ( self , _a ):
__a = []
for token in re.findall(self.pat , _a ):
__a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(''' ''' ) )
return bpe_tokens
def __UpperCAmelCase ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _a ):
return self.decoder.get(_a )
def __UpperCAmelCase ( self , _a ):
__a = ''''''.join(_a )
__a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
__a = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a=False , **_a ):
__a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
__a = ''' ''' + text
return (text, kwargs)
| 65 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=None , _a=True , _a=True , _a=None , ):
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = size
__a = do_normalize
__a = do_convert_rgb
__a = [512, 1_024, 2_048, 4_096]
__a = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __UpperCAmelCase ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCAmelCase ( self ):
__a = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__a = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
__a = PixaStructImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCAmelCase ( self ):
__a = self.image_processor_tester.prepare_dummy_image()
__a = self.image_processing_class(**self.image_processor_dict )
__a = 2_048
__a = image_processor(_a , return_tensors='''pt''' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
__a = '''Hello'''
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
_a , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
__a = PixaStructImageProcessingTester(self , num_channels=4 )
__a = 3
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCAmelCase ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase ( ) -> Tuple:
raise RuntimeError('''CUDA out of memory.''' )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self , _a ):
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
def __UpperCAmelCase ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a , _a ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__a , __a = mock_training_loop_function('''hello''' )
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __UpperCAmelCase ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_a ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __UpperCAmelCase ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __UpperCAmelCase ( self ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a , _a , _a ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __UpperCAmelCase ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __UpperCAmelCase ( self ):
__a = torch.cuda.memory_allocated()
__a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _a )
__a = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() , _a )
| 65 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"google/electra-small-generator": 5_1_2,
"google/electra-base-generator": 5_1_2,
"google/electra-large-generator": 5_1_2,
"google/electra-small-discriminator": 5_1_2,
"google/electra-base-discriminator": 5_1_2,
"google/electra-large-discriminator": 5_1_2,
}
lowercase_ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ElectraTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
__a = getattr(_a , normalizer_state.pop('''type''' ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**_a )
__a = do_lower_case
def __UpperCAmelCase ( self , _a , _a=None ):
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 65 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ) -> Dict:
__a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a = 16
elif accelerator.mixed_precision != "no":
__a = 8
else:
__a = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase__ ) == "1":
__a = 2
# Initialize accelerator
__a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a = model.to(accelerator.device )
# Instantiate optimizer
__a = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
__a , __a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase ( ) -> str:
__a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=1.0 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]=None ) -> Union[str, Any]:
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=400 , _a=2_000 , _a=2_048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=44_100 , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = spectrogram_length
__a = feature_size
__a = num_audio_channels
__a = hop_length
__a = chunk_length
__a = sampling_rate
def __UpperCAmelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCAmelCase ( self , _a=False , _a=False ):
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = TvltFeatureExtractor
def __UpperCAmelCase ( self ):
__a = TvltFeatureExtractionTester(self )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
__a = self.feature_extraction_class.from_pretrained(_a )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop('''mel_filters''' )
__a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
__a = self.feature_extraction_class.from_json_file(_a )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop('''mel_filters''' )
__a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
# Initialize feature_extractor
__a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
__a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a = feature_extractor(_a , return_tensors='''np''' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=44_100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(_a )
__a = feature_extractor(_a , return_tensors='''np''' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCAmelCase ( self , _a ):
__a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self ):
__a = self._load_datasamples(1 )
__a = TvltFeatureExtractor()
__a = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1E-4 ) )
| 65 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['onnx']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase ( lowerCAmelCase__ : Callable[[int | float], int | float] , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int = 100 , ) -> float:
__a = x_start
__a = fnc(lowerCAmelCase__ )
__a = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__a = (x_end - x_start) / steps + xa
__a = fnc(lowerCAmelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__a = xa
__a = fxa
return area
if __name__ == "__main__":
def lowercase ( lowerCAmelCase__ : int ) -> Optional[int]:
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowercase_ = 1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 65 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ):
__a = d_model
__a = parent
__a = batch_size
__a = prediction_length
__a = context_length
__a = cardinality
__a = num_time_features
__a = lags_sequence
__a = embedding_dimension
__a = is_training
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = context_length
__a = prediction_length + label_length
__a = label_length
__a = moving_average
__a = autocorrelation_factor
def __UpperCAmelCase ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __UpperCAmelCase ( self , _a ):
__a = config.context_length + max(config.lags_sequence )
__a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, _past_length] )
__a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, config.prediction_length] )
__a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = AutoformerModel(config=_a ).to(_a ).eval()
__a = model(**_a )
__a = outputs.encoder_last_hidden_state
__a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_encoder()
encoder.save_pretrained(_a )
__a = AutoformerEncoder.from_pretrained(_a ).to(_a )
__a , __a , __a , __a , __a = model.create_network_inputs(**_a )
__a , __a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_decoder()
decoder.save_pretrained(_a )
__a = AutoformerDecoder.from_pretrained(_a ).to(_a )
__a = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : int = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = AutoformerModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a , __a = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['''missing_keys'''] , [] )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = inspect.signature(getattr(_a , '''forward''' ) )
# The main input is the name of the argument after `self`
__a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''seq_length''' , _a )
__a = getattr(self.model_tester , '''decoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''encoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''d_model''' , _a )
__a = getattr(self.model_tester , '''num_attention_heads''' , _a )
__a = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a = len(_a )
__a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
__a = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __UpperCAmelCase ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( lowerCAmelCase__ : Optional[int]="train-batch.pt" ) -> List[str]:
__a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase__ , repo_type='''dataset''' )
__a = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch()
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
__a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
__a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1E-1 ) )
| 65 | 1 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 65 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Any:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__a = np.concatenate(lowerCAmelCase__ , axis=0 )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=0.99_95 ) -> int:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(lowerCAmelCase__ )
__a = np.sin(lowerCAmelCase__ )
__a = theta_a * t
__a = np.sin(lowerCAmelCase__ )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> List[str]:
for param in model.parameters():
__a = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , _a )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__a = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__a = torch.cat(_a , dim=0 )
else:
__a = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8215 * init_latents
__a = init_latents.repeat_interleave(_a , dim=0 )
__a = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__a = self.scheduler.add_noise(_a , _a , _a )
__a = init_latents
return latents
def __UpperCAmelCase ( self , _a ):
__a = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.feature_extractor.preprocess(_a )
__a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(_a )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * sample
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(_a )
__a = self.normalize(_a ).to(latents.dtype )
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
# get prompt text embeddings for content and style
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(_a , _a , self.device )
__a = timesteps[:1].repeat(_a )
# Preprocess image
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(_a , _a )
__a = self.get_clip_image_embeddings(_a , _a )
__a = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
__a = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 65 | 1 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
# we need a list not a string, so do something to change the type
__a = arr.split(''',''' )
def __UpperCAmelCase ( self ):
__a = [int(self.array[0] )] * len(self.array )
__a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase_ = input("please input some numbers:")
lowercase_ = SubArray(whole_array)
lowercase_ = array.solve_sub_array()
print(("the results is:", re))
| 65 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , _a , _a ):
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# No kwarg
__a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
__a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 65 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_a ).to(_a )
__a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss
__a = -(labels.shape[-1] * loss.item())
__a = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : float ) -> float:
__a = 0.0
for coeff in reversed(lowerCAmelCase__ ):
__a = result * x + coeff
return result
if __name__ == "__main__":
lowercase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowercase_ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__a = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
# fmt: off
__a = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
__a = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__a = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCAmelCase ( self , **_a ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self , **_a ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self , **_a ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
__a = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
__a = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCAmelCase ( self ):
__a = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
__a = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCAmelCase ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__a = self.prepare_image_inputs()
__a = image_processor(_a , return_tensors='''np''' )
__a = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__a = '''lower newer'''
__a = processor(text=_a )
__a = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCAmelCase ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__a = self.prepare_image_inputs()
__a = self.prepare_image_inputs()
__a = processor(images=_a , visual_prompt=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCAmelCase ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(_a )
__a = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 65 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 | 1 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase_ = logging.get_logger(__name__)
lowercase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Model type selected in the list: ' + ', '.join(__SCREAMING_SNAKE_CASE )} )
__UpperCAmelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__UpperCAmelCase : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCAmelCase : int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__UpperCAmelCase : int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__UpperCAmelCase : int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__UpperCAmelCase : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__UpperCAmelCase : int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__UpperCAmelCase : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__UpperCAmelCase : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'train'
__UpperCAmelCase : List[Any] = 'dev'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : SquadDataTrainingArguments
__UpperCAmelCase : List[SquadFeatures]
__UpperCAmelCase : Split
__UpperCAmelCase : bool
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = False , _a = None , _a = "pt" , ):
__a = args
__a = is_language_sensitive
__a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
__a = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__a = mode
# Load data features from cache or dataset file
__a = '''v2''' if args.version_2_with_negative else '''v1'''
__a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__a = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
__a = time.time()
__a = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__a = self.old_features['''features''']
__a = self.old_features.get('''dataset''' , _a )
__a = self.old_features.get('''examples''' , _a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
__a = self.processor.get_dev_examples(args.data_dir )
else:
__a = self.processor.get_train_examples(args.data_dir )
__a , __a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
__a = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
# Convert to Tensors and build dataset
__a = self.features[i]
__a = torch.tensor(feature.input_ids , dtype=torch.long )
__a = torch.tensor(feature.attention_mask , dtype=torch.long )
__a = torch.tensor(feature.token_type_ids , dtype=torch.long )
__a = torch.tensor(feature.cls_index , dtype=torch.long )
__a = torch.tensor(feature.p_mask , dtype=torch.float )
__a = torch.tensor(feature.is_impossible , dtype=torch.float )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__a = torch.tensor(feature.start_position , dtype=torch.long )
__a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase_ = pd.read_csv("sample_data.csv", header=None)
lowercase_ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase_ = df.iloc[:, 1:2]
lowercase_ = actual_data.values.reshape(len_data, 1)
lowercase_ = MinMaxScaler().fit_transform(actual_data)
lowercase_ = 1_0
lowercase_ = 5
lowercase_ = 2_0
lowercase_ = len_data - periods * look_back
lowercase_ = actual_data[:division]
lowercase_ = actual_data[division - look_back :]
lowercase_ , lowercase_ = [], []
lowercase_ , lowercase_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase_ = np.array(train_x)
lowercase_ = np.array(test_x)
lowercase_ = np.array([list(i.ravel()) for i in train_y])
lowercase_ = np.array([list(i.ravel()) for i in test_y])
lowercase_ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
lowercase_ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
lowercase_ = model.predict(x_test)
| 65 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ):
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = num_stages
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = intermediate_size
__a = hidden_act
__a = num_labels
__a = initializer_range
__a = out_features
__a = out_indices
__a = scope
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ConvNextVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ConvNextVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a = None
__a = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = False
def __UpperCAmelCase ( self ):
__a = ConvNextVaModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_with_labels()
__a = True
if model_class.__name__ in [
*get_values(_a ),
*get_values(_a ),
]:
continue
__a = model_class(_a )
model.to(_a )
model.train()
__a = self._prepare_for_class(_a , _a , return_labels=_a )
__a = model(**_a ).loss
loss.backward()
def __UpperCAmelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_with_labels()
__a = False
__a = True
if (
model_class.__name__
in [*get_values(_a ), *get_values(_a )]
or not model_class.supports_gradient_checkpointing
):
continue
__a = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
__a = self._prepare_for_class(_a , _a , return_labels=_a )
__a = model(**_a ).loss
loss.backward()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ConvNextVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> Any:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
__a = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_a )
__a = self.default_image_processor
__a = prepare_img()
__a = preprocessor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 65 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65 | 1 |
"""simple docstring"""
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
__a = ''''''
__a = 0
__a = 0
while div != 1:
__a , __a = divmod(lowerCAmelCase__ , lowerCAmelCase__ )
if base >= 11 and 9 < mod < 36:
__a = ALPHABET_VALUES[str(lowerCAmelCase__ )]
else:
__a = str(lowerCAmelCase__ )
new_value += actual_value
__a = num // base
__a = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowerCAmelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 65 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCAmelCase ( self , _a ):
# configuration for running training on smdistributed Model Parallel
__a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCAmelCase ( self , _a ):
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , _a ):
# create estimator
__a = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 65 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=True , _a=1 / 255 , _a=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def __UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self , _a , _a=False ):
if not batched:
__a = image_inputs[0]
if isinstance(_a , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size['''shortest_edge'''] * h / w )
__a = self.size['''shortest_edge''']
elif w > h:
__a = self.size['''shortest_edge''']
__a = int(self.size['''shortest_edge'''] * w / h )
else:
__a = self.size['''shortest_edge''']
__a = self.size['''shortest_edge''']
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(_a , key=lambda _a : item[0] )[0]
__a = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = DeformableDetrImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
__a = DeformableDetrImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''do_rescale''' ) )
self.assertTrue(hasattr(_a , '''do_pad''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _a )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _a )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(_a , batched=_a )
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self ):
# prepare image and target
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a = json.loads(f.read() )
__a = {'''image_id''': 39_769, '''annotations''': target}
# encode them
__a = DeformableDetrImageProcessor()
__a = image_processing(images=_a , annotations=_a , return_tensors='''pt''' )
# verify pixel values
__a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _a )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _a , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _a ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _a )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _a , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _a ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _a ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _a ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _a ) )
# verify size
__a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _a ) )
@slow
def __UpperCAmelCase ( self ):
# prepare image, target and masks_path
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a = json.loads(f.read() )
__a = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
__a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a = DeformableDetrImageProcessor(format='''coco_panoptic''' )
__a = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors='''pt''' )
# verify pixel values
__a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _a )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _a , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _a ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _a )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _a , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _a ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _a ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _a ) )
# verify masks
__a = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _a )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _a ) )
# verify size
__a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _a ) )
| 65 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 65 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__a = shift_tokens_right(_a , model.config.pad_token_id , model.config.decoder_start_token_id )
__a = model(_a , decoder_input_ids=_a ).logits
__a = optax.softmax_cross_entropy(_a , onehot(_a , logits.shape[-1] ) ).mean()
__a = -(labels.shape[-1] * loss.item())
__a = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 65 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]="pt" ) -> int:
__a = {'''add_prefix_space''': True} if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not line.startswith(''' ''' ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=lowerCAmelCase__ , padding='''max_length''' if pad_to_max_length else None , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=None , ) -> int:
__a = input_ids.ne(lowerCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a="train" , _a=None , _a=None , _a=None , _a="" , ):
super().__init__()
__a = Path(_a ).joinpath(type_path + '''.source''' )
__a = Path(_a ).joinpath(type_path + '''.target''' )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _a ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , _a ).rstrip('''\n''' )
__a = linecache.getline(str(self.tgt_file ) , _a ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _a ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , _a ) else self.tokenizer
__a = encode_line(_a , _a , self.max_source_length , '''right''' )
__a = encode_line(_a , _a , self.max_target_length , '''right''' )
__a = source_inputs['''input_ids'''].squeeze()
__a = target_inputs['''input_ids'''].squeeze()
__a = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( _a ):
return [len(_a ) for x in Path(_a ).open().readlines()]
def __UpperCAmelCase ( self , _a ):
__a = torch.stack([x['''input_ids'''] for x in batch] )
__a = torch.stack([x['''attention_mask'''] for x in batch] )
__a = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
__a = trim_batch(_a , _a )
__a , __a = trim_batch(_a , _a , attention_mask=_a )
__a = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase_ = getLogger(__name__)
def lowercase ( lowerCAmelCase__ : List[List] ) -> int:
return list(itertools.chain.from_iterable(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : str ) -> None:
__a = get_git_info()
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''git_log.json''' ) )
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=4 , **lowerCAmelCase__ : Optional[int] ) -> str:
with open(lowerCAmelCase__ , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Dict:
with open(lowerCAmelCase__ ) as f:
return json.load(lowerCAmelCase__ )
def lowercase ( ) -> Optional[Any]:
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowercase ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : Iterable ) -> List:
return list(map(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
with open(lowerCAmelCase__ , '''wb''' ) as f:
return pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] ) -> str:
def remove_articles(lowerCAmelCase__ : Any ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ : List[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ : int ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> Union[str, Any]:
__a = normalize_answer(lowerCAmelCase__ ).split()
__a = normalize_answer(lowerCAmelCase__ ).split()
__a = Counter(lowerCAmelCase__ ) & Counter(lowerCAmelCase__ )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(lowerCAmelCase__ )
__a = 1.0 * num_same / len(lowerCAmelCase__ )
__a = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> int:
return normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] ) -> Dict:
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__a = 0
for hypo, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
em += exact_match_score(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
em /= len(lowerCAmelCase__ )
return {"em": em}
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> Tuple:
return model_prefix.startswith('''rag''' )
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and not hasattr(lowerCAmelCase__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
continue
__a = p if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) else equivalent_param[p]
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
return hparams, config
| 65 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> list:
__a = len(lowerCAmelCase__ )
__a = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase_ = "\\n\n"
lowercase_ = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowercase_ = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __UpperCAmelCase ( self , _a , _a , _a = 16 , _a = True , _a=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__a = '''cuda'''
else:
__a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__a = AutoModelForCausalLM.from_pretrained(_a )
__a = model.to(_a )
__a = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__a = model.config.max_length - 1
else:
__a = model.config.max_length
__a = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
__a = encodings['''input_ids''']
__a = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__a = []
__a = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
__a = min(start_index + batch_size , len(_a ) )
__a = encoded_texts[start_index:end_index]
__a = attn_masks[start_index:end_index]
if add_start_token:
__a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
__a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
__a = encoded_batch
with torch.no_grad():
__a = model(_a , attention_mask=_a ).logits
__a = out_logits[..., :-1, :].contiguous()
__a = labels[..., 1:].contiguous()
__a = attn_mask[..., 1:].contiguous()
__a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 65 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowercase_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowercase_ = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
__a = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__a = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Tuple:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[1:-1]
__a = [tuple(merge.split() ) for merge in bpe_merges]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __UpperCAmelCase ( self ):
return len(self.encoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _a ):
if token in self.cache:
return self.cache[token]
__a = tuple(_a )
__a = get_pairs(_a )
if not pairs:
return token
while True:
__a = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_a ):
try:
__a = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_a )
__a = new_word
if len(_a ) == 1:
break
else:
__a = get_pairs(_a )
__a = ''' '''.join(_a )
__a = word
return word
def __UpperCAmelCase ( self , _a ):
__a = []
for token in re.findall(self.pat , _a ):
__a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(''' ''' ) )
return bpe_tokens
def __UpperCAmelCase ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _a ):
return self.decoder.get(_a )
def __UpperCAmelCase ( self , _a ):
__a = ''''''.join(_a )
__a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
__a = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a=False , **_a ):
__a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
__a = ''' ''' + text
return (text, kwargs)
| 65 | 1 |
"""simple docstring"""
import socket
def lowercase ( ) -> Dict:
__a = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__a = socket.gethostname()
__a = 12312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__a = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 65 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'pixel_values'
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = TimmBackboneConfig
def __init__( self , _a , **_a ):
requires_backends(self , '''timm''' )
super().__init__(_a )
__a = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(_a , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
__a = getattr(_a , '''use_pretrained_backbone''' , _a )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
__a = config.out_indices if getattr(_a , '''out_indices''' , _a ) is not None else (-1,)
__a = timm.create_model(
config.backbone , pretrained=_a , features_only=config.features_only , in_chans=config.num_channels , out_indices=_a , **_a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__a = self._backbone.return_layers
__a = {layer['''module''']: str(_a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_a )
@classmethod
def __UpperCAmelCase ( cls , _a , *_a , **_a ):
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
__a = kwargs.pop('''config''' , TimmBackboneConfig() )
__a = kwargs.pop('''use_timm_backbone''' , _a )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
__a = kwargs.pop('''num_channels''' , config.num_channels )
__a = kwargs.pop('''features_only''' , config.features_only )
__a = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
__a = kwargs.pop('''out_indices''' , config.out_indices )
__a = TimmBackboneConfig(
backbone=_a , num_channels=_a , features_only=_a , use_pretrained_backbone=_a , out_indices=_a , )
return super()._from_config(_a , **_a )
def __UpperCAmelCase ( self , _a ):
pass
def __UpperCAmelCase ( self , _a , _a=None , _a=None , _a=None , **_a ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__a = self._all_layers
__a = self._backbone(_a , **_a )
__a = self._return_layers
__a = tuple(hidden_states[i] for i in self.out_indices )
else:
__a = self._backbone(_a , **_a )
__a = None
__a = tuple(_a )
__a = tuple(_a ) if hidden_states is not None else None
if not return_dict:
__a = (feature_maps,)
if output_hidden_states:
__a = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_a , hidden_states=_a , attentions=_a )
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = num_of_nodes
__a = []
__a = {}
def __UpperCAmelCase ( self , _a , _a , _a ):
self.m_edges.append([u_node, v_node, weight] )
def __UpperCAmelCase ( self , _a ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __UpperCAmelCase ( self , _a ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a = self.find_component(_a )
def __UpperCAmelCase ( self , _a , _a , _a ):
if component_size[u_node] <= component_size[v_node]:
__a = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_a )
elif component_size[u_node] >= component_size[v_node]:
__a = self.find_component(_a )
component_size[u_node] += component_size[v_node]
self.set_component(_a )
def __UpperCAmelCase ( self ):
__a = []
__a = 0
__a = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a = edge
__a = self.m_component[u]
__a = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_a , _a ):
__a , __a , __a = edge
__a = self.m_component[u]
__a = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_a , _a , _a )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
__a = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowercase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list ) -> list:
if len(lowerCAmelCase__ ) < 2:
return collection
def circle_sort_util(lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
__a = False
if low == high:
return swapped
__a = low
__a = high
while left < right:
if collection[left] > collection[right]:
__a , __a = (
collection[right],
collection[left],
)
__a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__a , __a = (
collection[right + 1],
collection[left],
)
__a = True
__a = low + int((high - low) / 2 )
__a = circle_sort_util(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = circle_sort_util(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ )
return swapped or left_swap or right_swap
__a = True
while is_not_sorted is True:
__a = circle_sort_util(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) - 1 )
return collection
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 65 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['onnx']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
| 65 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
__UpperCAmelCase : Any = RobertaTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
__a = getattr(_a , pre_tok_state.pop('''type''' ) )
__a = add_prefix_space
__a = pre_tok_class(**_a )
__a = add_prefix_space
__a = '''post_processor'''
__a = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
__a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a = tuple(state['''sep'''] )
if "cls" in state:
__a = tuple(state['''cls'''] )
__a = False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
__a = add_prefix_space
__a = True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
__a = trim_offsets
__a = True
if changes_to_apply:
__a = getattr(_a , state.pop('''type''' ) )
__a = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def __UpperCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , _a ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
__a = value
def __UpperCAmelCase ( self , *_a , **_a ):
__a = kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
__a = kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCAmelCase ( self , _a , _a = None ):
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCAmelCase ( self , _a , _a=None ):
__a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 65 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ):
__a = d_model
__a = parent
__a = batch_size
__a = prediction_length
__a = context_length
__a = cardinality
__a = num_time_features
__a = lags_sequence
__a = embedding_dimension
__a = is_training
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = context_length
__a = prediction_length + label_length
__a = label_length
__a = moving_average
__a = autocorrelation_factor
def __UpperCAmelCase ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __UpperCAmelCase ( self , _a ):
__a = config.context_length + max(config.lags_sequence )
__a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, _past_length] )
__a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, config.prediction_length] )
__a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = AutoformerModel(config=_a ).to(_a ).eval()
__a = model(**_a )
__a = outputs.encoder_last_hidden_state
__a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_encoder()
encoder.save_pretrained(_a )
__a = AutoformerEncoder.from_pretrained(_a ).to(_a )
__a , __a , __a , __a , __a = model.create_network_inputs(**_a )
__a , __a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_decoder()
decoder.save_pretrained(_a )
__a = AutoformerDecoder.from_pretrained(_a ).to(_a )
__a = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : int = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = AutoformerModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a , __a = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['''missing_keys'''] , [] )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = inspect.signature(getattr(_a , '''forward''' ) )
# The main input is the name of the argument after `self`
__a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''seq_length''' , _a )
__a = getattr(self.model_tester , '''decoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''encoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''d_model''' , _a )
__a = getattr(self.model_tester , '''num_attention_heads''' , _a )
__a = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a = len(_a )
__a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
__a = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __UpperCAmelCase ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( lowerCAmelCase__ : Optional[int]="train-batch.pt" ) -> List[str]:
__a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase__ , repo_type='''dataset''' )
__a = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch()
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
__a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
__a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1E-1 ) )
| 65 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'glpn'
def __init__( self , _a=3 , _a=4 , _a=[2, 2, 2, 2] , _a=[8, 4, 2, 1] , _a=[32, 64, 160, 256] , _a=[7, 3, 3, 3] , _a=[4, 2, 2, 2] , _a=[1, 2, 5, 8] , _a=[4, 4, 4, 4] , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.1 , _a=1E-6 , _a=64 , _a=10 , _a=-1 , **_a , ):
super().__init__(**_a )
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = max_depth
__a = head_in_index
| 65 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Any:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__a = np.concatenate(lowerCAmelCase__ , axis=0 )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=0.99_95 ) -> int:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(lowerCAmelCase__ )
__a = np.sin(lowerCAmelCase__ )
__a = theta_a * t
__a = np.sin(lowerCAmelCase__ )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> List[str]:
for param in model.parameters():
__a = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , _a )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__a = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__a = torch.cat(_a , dim=0 )
else:
__a = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8215 * init_latents
__a = init_latents.repeat_interleave(_a , dim=0 )
__a = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__a = self.scheduler.add_noise(_a , _a , _a )
__a = init_latents
return latents
def __UpperCAmelCase ( self , _a ):
__a = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.feature_extractor.preprocess(_a )
__a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(_a )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * sample
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(_a )
__a = self.normalize(_a ).to(latents.dtype )
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
# get prompt text embeddings for content and style
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(_a , _a , self.device )
__a = timesteps[:1].repeat(_a )
# Preprocess image
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(_a , _a )
__a = self.get_clip_image_embeddings(_a , _a )
__a = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
__a = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 65 | 1 |
"""simple docstring"""
import os
def lowercase ( ) -> Optional[Any]:
with open(os.path.dirname(lowerCAmelCase__ ) + '''/grid.txt''' ) as f:
__a = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
__a = 0
# right
for i in range(20 ):
for j in range(17 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(17 ):
for j in range(20 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 65 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> Union[str, Any]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
__a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
__a = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = -1_0_0
__UpperCAmelCase : str = "pt"
def __UpperCAmelCase ( self , _a ):
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids'''] ).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(_a , -1 , _a , _a )
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(_a , (-1, -1) , _a , _a )
__a = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 65 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , _a , _a ):
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# No kwarg
__a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
__a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 65 | 1 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> list[int]:
__a = []
__a = 2
__a = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
__a = [True] * (end + 1)
__a = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
__a = False
start += 1
prime += in_prime
__a = end + 1
__a = min(2 * end , lowerCAmelCase__ )
while low <= n:
__a = [True] * (high - low + 1)
for each in in_prime:
__a = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
__a = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
__a = high + 1
__a = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Dict:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Any:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[Any]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__a = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase_ , lowercase_ , lowercase_ = False, False, False
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase : str = field(default='Audio' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return self.pa_type
def __UpperCAmelCase ( self , _a ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__a = BytesIO()
sf.write(_a , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__a = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
__a = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 32_767
__a = BytesIO(bytes() )
sf.write(_a , _a , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
__a , __a = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
__a = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
__a = token_per_repo_id or {}
__a = path.split('''::''' )[-1]
try:
__a = string_to_dict(_a , config.HUB_DATASETS_URL )['''repo_id''']
__a = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__a = None
with xopen(_a , '''rb''' , use_auth_token=_a ) as f:
__a , __a = sf.read(_a )
else:
__a , __a = sf.read(_a )
__a = array.T
if self.mono:
__a = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__a = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
__a = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCAmelCase ( self ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __UpperCAmelCase ( self , _a ):
if pa.types.is_string(storage.type ):
__a = pa.array([None] * len(_a ) , type=pa.binary() )
__a = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__a = pa.array([None] * len(_a ) , type=pa.string() )
__a = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
__a = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__a = storage.field('''bytes''' )
else:
__a = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__a = storage.field('''path''' )
else:
__a = pa.array([None] * len(_a ) , type=pa.string() )
__a = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def __UpperCAmelCase ( self , _a ):
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a , '''rb''' ) as f:
__a = f.read()
return bytes_
__a = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__a = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__a = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 65 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'is_longer']
def __init__( self , _a=64 , _a=48_000 , _a=480 , _a=10 , _a=1_024 , _a=0.0 , _a=False , _a = 0 , _a = 14_000 , _a = None , _a = "fusion" , _a = "repeatpad" , **_a , ):
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , )
__a = top_db
__a = truncation
__a = padding
__a = fft_window_size
__a = (fft_window_size >> 1) + 1
__a = hop_length
__a = max_length_s
__a = max_length_s * sampling_rate
__a = sampling_rate
__a = frequency_min
__a = frequency_max
__a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm=_a , mel_scale='''htk''' , )
__a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm='''slaney''' , mel_scale='''slaney''' , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self , _a , _a = None ):
__a = spectrogram(
_a , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_a , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__a = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__a = [0]
# randomly choose index for each part
__a = np.random.choice(ranges[0] )
__a = np.random.choice(ranges[1] )
__a = np.random.choice(ranges[2] )
__a = mel[idx_front : idx_front + chunk_frames, :]
__a = mel[idx_middle : idx_middle + chunk_frames, :]
__a = mel[idx_back : idx_back + chunk_frames, :]
__a = torch.tensor(mel[None, None, :] )
__a = torch.nn.functional.interpolate(
_a , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_a )
__a = mel_shrink[0][0].numpy()
__a = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__a = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__a = len(_a ) - max_length
__a = np.random.randint(0 , overflow + 1 )
__a = waveform[idx : idx + max_length]
__a = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__a = self._np_extract_fbank_features(_a , self.mel_filters )
__a = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__a = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__a = np.stack([mel, mel, mel, mel] , axis=0 )
__a = False
else:
__a = self._random_mel_fusion(_a , _a , _a )
__a = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
__a = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__a = int(max_length / len(_a ) )
__a = np.stack(np.tile(_a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__a = int(max_length / len(_a ) )
__a = np.stack(np.tile(_a , _a ) )
__a = np.pad(_a , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__a = self._np_extract_fbank_features(_a , self.mel_filters )
__a = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__a = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , **_a , ):
__a = truncation if truncation is not None else self.truncation
__a = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
__a = [
self._get_input_mel(_a , max_length if max_length else self.nb_max_samples , _a , _a )
for waveform in raw_speech
]
__a = []
__a = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__a = np.random.randint(0 , len(_a ) )
__a = True
if isinstance(input_mel[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__a = [[longer] for longer in is_longer]
__a = {'''input_features''': input_mel, '''is_longer''': is_longer}
__a = BatchFeature(_a )
if return_tensors is not None:
__a = input_features.convert_to_tensors(_a )
return input_features
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase ( lowerCAmelCase__ : str ) -> List[str]:
__a = {}
__a = tokenizer(example['''content'''] , truncation=lowerCAmelCase__ )['''input_ids''']
__a = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase_ = HfArgumentParser(PretokenizationArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split="train")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
lowercase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 65 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> list:
__a = len(lowerCAmelCase__ )
__a = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65 | 1 |
"""simple docstring"""
import re
def lowercase ( lowerCAmelCase__ : str ) -> bool:
__a = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase_ = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 65 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCAmelCase ( self , _a ):
# configuration for running training on smdistributed Model Parallel
__a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCAmelCase ( self , _a ):
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , _a ):
# create estimator
__a = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 65 | 1 |
"""simple docstring"""
from math import isqrt, loga
def lowercase ( lowerCAmelCase__ : int ) -> list[int]:
__a = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = False
return [i for i in range(2 , lowerCAmelCase__ ) if is_prime[i]]
def lowercase ( lowerCAmelCase__ : int = 800800 , lowerCAmelCase__ : int = 800800 ) -> int:
__a = degree * loga(lowerCAmelCase__ )
__a = int(lowerCAmelCase__ )
__a = calculate_prime_numbers(lowerCAmelCase__ )
__a = 0
__a = 0
__a = len(lowerCAmelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = BlenderbotSmallConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : List[str] = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_blenderbot_small_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = TFBlenderbotSmallModel(config=_a ).get_decoder()
__a = inputs_dict['''input_ids''']
__a = input_ids[:1, :]
__a = inputs_dict['''attention_mask'''][:1, :]
__a = inputs_dict['''head_mask''']
__a = 1
# first forward pass
__a = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a = tf.concat([input_ids, next_tokens] , axis=-1 )
__a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a = model(_a , attention_mask=_a )[0]
__a = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a = output_from_no_past[:, -3:, random_slice_idx]
__a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=None , ) -> Dict:
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCAmelCase : str = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Optional[Any] = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Any = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = TFBlenderbotSmallModelTester(self )
__a = ConfigTester(self , config_class=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__UpperCAmelCase : int = 'facebook/blenderbot_small-90M'
@cached_property
def __UpperCAmelCase ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __UpperCAmelCase ( self ):
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer(self.src_text , return_tensors='''tf''' )
__a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_a , )
__a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowercase_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowercase_ = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
__a = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__a = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Tuple:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[1:-1]
__a = [tuple(merge.split() ) for merge in bpe_merges]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __UpperCAmelCase ( self ):
return len(self.encoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _a ):
if token in self.cache:
return self.cache[token]
__a = tuple(_a )
__a = get_pairs(_a )
if not pairs:
return token
while True:
__a = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_a ):
try:
__a = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_a )
__a = new_word
if len(_a ) == 1:
break
else:
__a = get_pairs(_a )
__a = ''' '''.join(_a )
__a = word
return word
def __UpperCAmelCase ( self , _a ):
__a = []
for token in re.findall(self.pat , _a ):
__a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(''' ''' ) )
return bpe_tokens
def __UpperCAmelCase ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _a ):
return self.decoder.get(_a )
def __UpperCAmelCase ( self , _a ):
__a = ''''''.join(_a )
__a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
__a = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a=False , **_a ):
__a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
__a = ''' ''' + text
return (text, kwargs)
| 65 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'data2vec-audio'
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1E-5 , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=16 , _a=19 , _a=5 , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="sum" , _a=False , _a=False , _a=256 , _a=(512, 512, 512, 512, 1_500) , _a=(5, 3, 3, 1, 1) , _a=(1, 2, 3, 1, 1) , _a=512 , _a=0 , _a=1 , _a=2 , _a=False , _a=3 , _a=2 , _a=3 , _a=None , **_a , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__a = hidden_size
__a = feat_extract_activation
__a = list(_a )
__a = list(_a )
__a = list(_a )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = conv_pos_kernel_size
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layerdrop
__a = layer_norm_eps
__a = initializer_range
__a = vocab_size
__a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# adapter
__a = add_adapter
__a = adapter_kernel_size
__a = adapter_stride
__a = num_adapter_layers
__a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a = list(_a )
__a = list(_a )
__a = list(_a )
__a = xvector_output_dim
@property
def __UpperCAmelCase ( self ):
return math.prod(self.conv_stride )
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> list:
__a = len(lowerCAmelCase__ )
__a = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]="attention" ) -> Optional[Any]:
__a = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__a = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__a = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__a = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=False ) -> Dict:
if split_mlp_wi:
__a = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__a = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__a = (wi_a, wi_a)
else:
__a = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
__a = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> Any:
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowercase ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool ) -> List[Any]:
__a = traverse_util.flatten_dict(variables['''target'''] )
__a = {'''/'''.join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase__ )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_attention_layer_norm''' )
__a , __a , __a , __a = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''attention''' )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' )
__a , __a = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , lowerCAmelCase__ )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__a = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
__a , __a , __a , __a = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''self_attention''' )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__a , __a , __a , __a = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''encoder_decoder_attention''' )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' )
__a , __a = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , lowerCAmelCase__ )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old['''decoder/decoder_norm/scale''']
__a = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old['''decoder/logits_dense/kernel'''].T
return new
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : bool ) -> Optional[Any]:
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__a = state_dict['''shared.weight''']
return state_dict
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__a = convert_tax_to_pytorch(lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ )
__a = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : bool = False ) -> List[str]:
__a = TaConfig.from_json_file(lowerCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(lowerCAmelCase__ )
else:
__a = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print('''Done''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 65 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowercase_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowercase_ = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
__a = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__a = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Tuple:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[1:-1]
__a = [tuple(merge.split() ) for merge in bpe_merges]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __UpperCAmelCase ( self ):
return len(self.encoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _a ):
if token in self.cache:
return self.cache[token]
__a = tuple(_a )
__a = get_pairs(_a )
if not pairs:
return token
while True:
__a = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_a ):
try:
__a = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_a )
__a = new_word
if len(_a ) == 1:
break
else:
__a = get_pairs(_a )
__a = ''' '''.join(_a )
__a = word
return word
def __UpperCAmelCase ( self , _a ):
__a = []
for token in re.findall(self.pat , _a ):
__a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(''' ''' ) )
return bpe_tokens
def __UpperCAmelCase ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _a ):
return self.decoder.get(_a )
def __UpperCAmelCase ( self , _a ):
__a = ''''''.join(_a )
__a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
__a = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a=False , **_a ):
__a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
__a = ''' ''' + text
return (text, kwargs)
| 65 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__a = str(bin(lowerCAmelCase__ ) )[2:] # remove the leading "0b"
__a = str(bin(lowerCAmelCase__ ) )[2:] # remove the leading "0b"
__a = max(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase__ ) , b_binary.zfill(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ = 3
def lowercase ( lowerCAmelCase__ : int ) -> int:
print('''Generating primitive root of p''' )
while True:
__a = random.randrange(3 , lowerCAmelCase__ )
if pow(lowerCAmelCase__ , 2 , lowerCAmelCase__ ) == 1:
continue
if pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) == 1:
continue
return g
def lowercase ( lowerCAmelCase__ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
__a = rabin_miller.generate_large_prime(lowerCAmelCase__ ) # select large prime number.
__a = primitive_root(lowerCAmelCase__ ) # one primitive root on modulo p.
__a = random.randrange(3 , lowerCAmelCase__ ) # private_key -> have to be greater than 2 for safety.
__a = cryptomath.find_mod_inverse(pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
__a = (key_size, e_a, e_a, p)
__a = (key_size, d)
return public_key, private_key
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__a , __a = generate_key(lowerCAmelCase__ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def lowercase ( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
import itertools
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ) -> int:
__a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 65 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'swin2sr'
__UpperCAmelCase : Tuple = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=64 , _a=1 , _a=3 , _a=180 , _a=[6, 6, 6, 6, 6, 6] , _a=[6, 6, 6, 6, 6, 6] , _a=8 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1E-5 , _a=2 , _a=1.0 , _a="1conv" , _a="pixelshuffle" , **_a , ):
super().__init__(**_a )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(_a )
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = layer_norm_eps
__a = initializer_range
__a = upscale
__a = img_range
__a = resi_connection
__a = upsampler
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 65 | 1 |
"""simple docstring"""
import random
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : List[Any] ) -> tuple:
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase__ )
elif element > pivot:
greater.append(lowerCAmelCase__ )
else:
equal.append(lowerCAmelCase__ )
return less, equal, greater
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> Union[str, Any]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowerCAmelCase__ ) or index < 0:
return None
__a = items[random.randint(0 , len(lowerCAmelCase__ ) - 1 )]
__a = 0
__a , __a , __a = _partition(lowerCAmelCase__ , lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase__ , lowerCAmelCase__ )
# must be in larger
else:
return quick_select(lowerCAmelCase__ , index - (m + count) )
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
def __UpperCAmelCase ( self ):
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCAmelCase ( self ):
return self.major, self.minor, self.patch
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
return Version(_a )
elif isinstance(_a , _a ):
return other
raise TypeError(f'''{other} (type {type(_a )}) cannot be compared to version.''' )
def __eq__( self , _a ):
try:
__a = self._validate_operand(_a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _a ):
__a = self._validate_operand(_a )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCAmelCase ( cls , _a ):
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCAmelCase ( self ):
return self.version_str
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = _VERSION_REG.match(lowerCAmelCase__ )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowerCAmelCase__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( lowerCAmelCase__ : Dict ) -> List[str]:
return ".".join(str(lowerCAmelCase__ ) for v in version_tuple )
| 65 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['onnx']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls , *_a , **_a ):
requires_backends(cls , ['''onnx'''] )
| 65 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# A mock response for an HTTP head request to emulate server down
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCAmelCase ( self ):
# A mock response for an HTTP head request to emulate server down
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
__a = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
__a = tempfile.mktemp()
with open(_a , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , _a )
__a = AlbertTokenizer.from_pretrained(_a )
finally:
os.remove(_a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , _a )
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def __UpperCAmelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
__a = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__a = BertTokenizer(_a )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
__a = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a , repo_id='''test-tokenizer''' , push_to_hub=_a , use_auth_token=self._token )
__a = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__a = BertTokenizer(_a )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_a , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCAmelCase ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__a = CustomTokenizer(_a )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__a = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__a = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
__a = CustomTokenizerFast.from_pretrained(_a )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__a = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
__a = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_a , trust_remote_code=_a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def __UpperCAmelCase ( self ):
__a = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def __UpperCAmelCase ( self ):
__a = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def __UpperCAmelCase ( self ):
__a = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __UpperCAmelCase ( self ):
__a = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __UpperCAmelCase ( self ):
__a = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def __UpperCAmelCase ( self ):
__a = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def __UpperCAmelCase ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__a = Trie()
__a = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_a , ['''AB''', '''C'''] )
| 65 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ):
__a = d_model
__a = parent
__a = batch_size
__a = prediction_length
__a = context_length
__a = cardinality
__a = num_time_features
__a = lags_sequence
__a = embedding_dimension
__a = is_training
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = context_length
__a = prediction_length + label_length
__a = label_length
__a = moving_average
__a = autocorrelation_factor
def __UpperCAmelCase ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __UpperCAmelCase ( self , _a ):
__a = config.context_length + max(config.lags_sequence )
__a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, _past_length] )
__a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, config.prediction_length] )
__a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = AutoformerModel(config=_a ).to(_a ).eval()
__a = model(**_a )
__a = outputs.encoder_last_hidden_state
__a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_encoder()
encoder.save_pretrained(_a )
__a = AutoformerEncoder.from_pretrained(_a ).to(_a )
__a , __a , __a , __a , __a = model.create_network_inputs(**_a )
__a , __a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_decoder()
decoder.save_pretrained(_a )
__a = AutoformerDecoder.from_pretrained(_a ).to(_a )
__a = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : int = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = AutoformerModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a , __a = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['''missing_keys'''] , [] )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = inspect.signature(getattr(_a , '''forward''' ) )
# The main input is the name of the argument after `self`
__a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''seq_length''' , _a )
__a = getattr(self.model_tester , '''decoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''encoder_seq_length''' , _a )
__a = getattr(self.model_tester , '''d_model''' , _a )
__a = getattr(self.model_tester , '''num_attention_heads''' , _a )
__a = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a = len(_a )
__a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
__a = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __UpperCAmelCase ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( lowerCAmelCase__ : Optional[int]="train-batch.pt" ) -> List[str]:
__a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase__ , repo_type='''dataset''' )
__a = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch()
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __UpperCAmelCase ( self ):
__a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_a )
__a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
__a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
__a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1E-1 ) )
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : int
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = [[] for _ in range(_a )]
__a = size
def __getitem__( self , _a ):
return iter(self._graph[vertex] )
@property
def __UpperCAmelCase ( self ):
return self._size
def __UpperCAmelCase ( self , _a , _a , _a ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_a , _a ) )
def __UpperCAmelCase ( self , _a , _a ):
__a = deque([start_vertex] )
__a = [None] * self.size
__a = 0
while queue:
__a = queue.popleft()
__a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a = current_distance + edge.weight
__a = distances[edge.destination_vertex]
if (
isinstance(_a , _a )
and new_distance >= dest_vertex_distance
):
continue
__a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Any:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__a = np.concatenate(lowerCAmelCase__ , axis=0 )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=0.99_95 ) -> int:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(lowerCAmelCase__ )
__a = np.sin(lowerCAmelCase__ )
__a = theta_a * t
__a = np.sin(lowerCAmelCase__ )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> List[str]:
for param in model.parameters():
__a = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , _a )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__a = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__a = torch.cat(_a , dim=0 )
else:
__a = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8215 * init_latents
__a = init_latents.repeat_interleave(_a , dim=0 )
__a = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__a = self.scheduler.add_noise(_a , _a , _a )
__a = init_latents
return latents
def __UpperCAmelCase ( self , _a ):
__a = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.feature_extractor.preprocess(_a )
__a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(_a )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * sample
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(_a )
__a = self.normalize(_a ).to(latents.dtype )
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
# get prompt text embeddings for content and style
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(_a , _a , self.device )
__a = timesteps[:1].repeat(_a )
# Preprocess image
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(_a , _a )
__a = self.get_clip_image_embeddings(_a , _a )
__a = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
__a = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 65 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'sequence-classification'
def __init__( self , _a ):
if type(_a ) == dict:
__a = Namespace(**_a )
__a = glue_output_modes[hparams.task]
__a = glue_tasks_num_labels[hparams.task]
super().__init__(_a , _a , self.mode )
def __UpperCAmelCase ( self , **_a ):
return self.model(**_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
__a = self(**_a )
__a = outputs[0]
__a = self.trainer.lr_schedulers[0]['''scheduler''']
__a = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCAmelCase ( self ):
__a = self.hparams
__a = processors[args.task]()
__a = processor.get_labels()
for mode in ["train", "dev"]:
__a = self._feature_file(_a )
if os.path.exists(_a ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , _a )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
__a = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
__a = convert_examples_to_features(
_a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , _a )
torch.save(_a , _a )
def __UpperCAmelCase ( self , _a , _a , _a = False ):
__a = '''dev''' if mode == '''test''' else mode
__a = self._feature_file(_a )
logger.info('''Loading features from cached file %s''' , _a )
__a = torch.load(_a )
__a = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__a = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__a = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_a , _a , _a , _a ) , batch_size=_a , shuffle=_a , )
def __UpperCAmelCase ( self , _a , _a ):
__a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
__a = self(**_a )
__a , __a = outputs[:2]
__a = logits.detach().cpu().numpy()
__a = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCAmelCase ( self , _a ):
__a = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
__a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__a = np.argmax(_a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__a = np.squeeze(_a )
__a = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
__a = [[] for _ in range(out_label_ids.shape[0] )]
__a = [[] for _ in range(out_label_ids.shape[0] )]
__a = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , _a , _a )}
__a = dict(results.items() )
__a = results
return ret, preds_list, out_label_list
def __UpperCAmelCase ( self , _a ):
__a , __a , __a = self._eval_end(_a )
__a = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCAmelCase ( self , _a ):
__a , __a , __a = self._eval_end(_a )
__a = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCAmelCase ( _a , _a ):
BaseTransformer.add_model_specific_args(_a , _a )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=_a , required=_a , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=_a , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowercase ( ) -> List[Any]:
__a = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase__ , os.getcwd() )
__a = GLUETransformer.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
__a = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__a = os.path.join(
'''./results''' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__a = GLUETransformer(lowerCAmelCase__ )
__a = generic_train(lowerCAmelCase__ , lowerCAmelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__a = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=lowerCAmelCase__ ) )
__a = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'open-llama'
def __init__( self , _a=100_000 , _a=4_096 , _a=11_008 , _a=32 , _a=32 , _a="silu" , _a=2_048 , _a=0.02 , _a=1E-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=False , _a=True , _a=0.1 , _a=0.1 , _a=True , _a=True , _a=None , **_a , ):
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = initializer_range
__a = rms_norm_eps
__a = use_cache
__a = kwargs.pop(
'''use_memorry_efficient_attention''' , _a )
__a = hidden_dropout_prob
__a = attention_dropout_prob
__a = use_stable_embedding
__a = shared_input_output_embedding
__a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def __UpperCAmelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__a = self.rope_scaling.get('''type''' , _a )
__a = self.rope_scaling.get('''factor''' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 65 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , _a , _a ):
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# No kwarg
__a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_a , {'''sequence''': ANY(_a ), '''labels''': [ANY(_a )], '''scores''': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
__a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_a , [
{'''sequence''': ANY(_a ), '''labels''': [ANY(_a ), ANY(_a )], '''scores''': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='''politics''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_a ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def __UpperCAmelCase ( self , _a ):
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
__a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 65 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'fnet'
def __init__( self , _a=32_000 , _a=768 , _a=12 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1E-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_tpu_fourier_optimizations
__a = tpu_short_seq_length
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase_ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__a = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] = JukeboxTokenizer
__UpperCAmelCase : Dict = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def __UpperCAmelCase ( self ):
import torch
__a = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__a = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __UpperCAmelCase ( self ):
import torch
__a = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__a = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 65 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [text_path]
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {'''text''': '''string'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
if split:
__a = {split: text_path}
else:
__a = '''train'''
__a = {'''train''': text_path, '''test''': text_path}
__a = tmp_path / '''cache'''
__a = {'''text''': '''string'''}
__a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.