code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import operator as op
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = []
_lowerCAmelCase :Dict = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation
_lowerCAmelCase :Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__magic_name__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__magic_name__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' )
else:
_lowerCAmelCase :Union[str, Any] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' )
_lowerCAmelCase :int = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' )
stack.append(
str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
a = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix)) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 3 , _lowerCamelCase = 7 , _lowerCamelCase = 1000000 ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 1
for current_denominator in range(1 , limit + 1 ):
_lowerCamelCase : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowerCamelCase : Optional[int] = current_numerator
_lowerCamelCase : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000)) | 386 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger()
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = field(default_factory=_a )
lowerCAmelCase__ = field(default_factory=_a )
def _lowercase ( self: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tensor ,__lowerCAmelCase: Tensor ):
'''simple docstring'''
_lowerCamelCase : Dict = len(list(m.modules() ) ) == 1 or isinstance(__lowerCAmelCase ,nn.Convad ) or isinstance(__lowerCAmelCase ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowerCAmelCase )
def __call__( self: Optional[Any] ,__lowerCAmelCase: Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self: str ):
'''simple docstring'''
return list(filter(lambda __lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 1
lowerCAmelCase__ = field(default_factory=_a )
lowerCAmelCase__ = field(default_factory=_a )
lowerCAmelCase__ = True
def __call__( self: List[Any] ,__lowerCAmelCase: Tensor ):
'''simple docstring'''
_lowerCamelCase : Dict = Tracker(self.dest )(__lowerCAmelCase ).parametrized
_lowerCamelCase : List[Any] = Tracker(self.src )(__lowerCAmelCase ).parametrized
_lowerCamelCase : List[str] = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.src_skip ,__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.dest_skip ,__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(__lowerCAmelCase )} operations while"""
F""" destination module has {len(__lowerCAmelCase )}.""" )
for dest_m, src_m in zip(__lowerCAmelCase ,__lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class A_ ( nn.Module ):
def __init__( self: int ,__lowerCAmelCase: nn.Module ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"""Unexpected layer name {k}"""
_lowerCamelCase : Dict = len(__lowerCAmelCase ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
_lowerCamelCase : int = nn.ModuleDict(__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
__lowerCAmelCase ,out_feat_keys=__lowerCAmelCase ,feature_blocks=self._feature_blocks ,)
class A_ ( _a ):
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self: Tuple ,__lowerCAmelCase: str ):
'''simple docstring'''
if x not in self:
_lowerCamelCase : Dict = self.convert_name_to_timm(__lowerCAmelCase )
_lowerCamelCase : Tuple = partial(lambda: (timm.create_model(__lowerCAmelCase ,pretrained=__lowerCAmelCase ).eval(), None) )
else:
_lowerCamelCase : List[Any] = super().__getitem__(__lowerCAmelCase )
return val
class A_ ( _a ):
def __getitem__( self: int ,__lowerCAmelCase: str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
_lowerCamelCase : List[str] = RegNetModel
else:
_lowerCamelCase : str = RegNetForImageClassification
return val
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
for from_key, to_key in keys:
_lowerCamelCase : Optional[int] = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> List[str]:
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
_lowerCamelCase, _lowerCamelCase : str = from_model_func()
_lowerCamelCase : Tuple = our_model_func(_lowerCamelCase ).eval()
_lowerCamelCase : List[Any] = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase , raise_if_mismatch=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCamelCase )
if from_state_dict is not None:
_lowerCamelCase : Optional[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_lowerCamelCase : str = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_lowerCamelCase : List[str] = manually_copy_vissl_head(_lowerCamelCase , our_model.state_dict() , _lowerCamelCase )
our_model.load_state_dict(_lowerCamelCase )
_lowerCamelCase : str = our_model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
_lowerCamelCase : Tuple = (
our_outputs.logits if isinstance(_lowerCamelCase , _lowerCamelCase ) else our_outputs.last_hidden_state
)
_lowerCamelCase : Dict = from_model(_lowerCamelCase )
_lowerCamelCase : Dict = from_output[-1] if type(_lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_lowerCamelCase : Optional[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
_lowerCamelCase : Optional[Any] = 224 if "seer" not in name else 384
# we can use the convnext one
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
print(F"""Pushed {name}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[Any] = 1000
_lowerCamelCase : Any = (1, num_labels)
_lowerCamelCase : Optional[int] = "huggingface/label-files"
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Dict = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
_lowerCamelCase : Any = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
_lowerCamelCase : Tuple = NameToOurModelFuncMap()
_lowerCamelCase : Union[str, Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase , _lowerCamelCase ) -> Tuple[nn.Module, Dict]:
_lowerCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , model_dir=str(_lowerCamelCase ) , map_location="cpu" )
_lowerCamelCase : Dict = model_func()
# check if we have a head, if yes add it
_lowerCamelCase : str = files["classy_state_dict"]["base_model"]["model"]
_lowerCamelCase : Dict = model_state_dict["trunk"]
model.load_state_dict(_lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
_lowerCamelCase : str = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCamelCase : List[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCamelCase : int = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCamelCase : Optional[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
_lowerCamelCase : Union[str, Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCamelCase : Optional[int] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCamelCase : List[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCamelCase : Optional[int] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase : Dict = parser.parse_args()
_lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 386 | 1 |
def _snake_case ( __snake_case , __snake_case ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def _snake_case ( __snake_case , __snake_case ):
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 10 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ) -> int:
snake_case__ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : int = 16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple = 8
else:
snake_case__ : Tuple = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __SCREAMING_SNAKE_CASE ) == "1":
snake_case__ : Optional[int] = 2
# New Code #
snake_case__ : List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Optional[Any] = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : Any = int(config['seed'] )
snake_case__ : List[str] = int(config['batch_size'] )
snake_case__ : Tuple = evaluate.load('glue' , 'mrpc' )
set_seed(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Any = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case__ : str = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
snake_case__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Optional[int]:
snake_case__ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : Any = parser.parse_args()
snake_case__ : Dict = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 270 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__:Tuple = """base_with_context"""
def _lowerCamelCase( a , a ):
__a = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
__a = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=a )
for lyr_num, lyr in enumerate(model.encoders ):
__a = weights[F"layers_{lyr_num}"]
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__a = ly_weight["attention"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _lowerCamelCase( a , a ):
__a = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=a )
for lyr_num, lyr in enumerate(model.encoders ):
__a = weights[F"layers_{lyr_num}"]
__a = ly_weight["attention"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _lowerCamelCase( a , a ):
__a = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=a )
__a = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__a = weights[F"layers_{lyr_num}"]
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
__a = ly_weight["self_attention"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = ly_weight["MultiHeadDotProductAttention_0"]
__a = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__a = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__a = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
__a = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def _lowerCamelCase( a ):
__a = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__a = jnp.tree_util.tree_map(onp.array , a )
__a = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
__a = os.path.join(args.checkpoint_path , ".." , "config.gin" )
__a = inference.parse_training_gin_file(a , a )
__a = inference.InferenceModel(args.checkpoint_path , a )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
__a = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__a = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__a = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__a = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , a )
__a = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , a )
__a = load_decoder(ta_checkpoint["target"]["decoder"] , a )
__a = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
__a = SpectrogramDiffusionPipeline(
notes_encoder=a , continuous_encoder=a , decoder=a , scheduler=a , melgan=a , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
SCREAMING_SNAKE_CASE__:Dict = parser.parse_args()
main(args)
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class lowercase__ :
def __init__( self : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = value
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class lowercase__ :
def __init__( self : int , _lowercase : Node ):
"""simple docstring"""
UpperCAmelCase__ = tree
def _UpperCAmelCase ( self : str , _lowercase : Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
from __future__ import annotations
def __UpperCAmelCase ( __A , __A , __A , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = IFPipeline
lowerCamelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Dict = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase__ ( self : str ):
return self._get_dummy_components()
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : List[Any]=0 ):
if str(_lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase__ ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase__ ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase__ ( self : int ):
self._test_save_load_local()
def lowercase__ ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase__ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ):
# if
SCREAMING_SNAKE_CASE__ : int = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_lowercase , tokenizer=_lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Tuple = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : int = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_lowercase , _lowercase , _lowercase , _lowercase )
def lowercase__ ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : List[str] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : str = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(_lowercase , _lowercase )
def lowercase__ ( self : Any , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Any ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(_lowercase , _lowercase )
def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : List[str] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : str = (32, 32)
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Tuple = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE : int = DDPMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Tuple = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCamelCase_ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE : Any = DDPMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[str] = self.dummy_vae
SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE : Dict = DDPMScheduler()
SCREAMING_SNAKE_CASE : str = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : int = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE : List[str] = unet.half()
SCREAMING_SNAKE_CASE : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
SCREAMING_SNAKE_CASE : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
SCREAMING_SNAKE_CASE : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Dict = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Tuple = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Optional[int] = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
from math import ceil, sqrt
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase_ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : float | Decimal , lowercase : float = 10**-10 ):
'''simple docstring'''
lowerCamelCase_ = a
while True:
lowerCamelCase_ = Decimal(lowercase ) - (
Decimal(eval(lowercase ) ) / Decimal(eval(str(diff(lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase ) ) < precision: # noqa: S307
return float(lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 70 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase : List[Any] = logging.get_logger("""transformers.models.speecht5""")
def _A ( A ,A ,A ) -> List[Any]:
hf_model.apply_weight_norm()
lowercase : str = checkpoint["input_conv.weight_g"]
lowercase : Any = checkpoint["input_conv.weight_v"]
lowercase : Dict = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase : str = checkpoint[F'''upsamples.{i}.1.weight_g''']
lowercase : List[str] = checkpoint[F'''upsamples.{i}.1.weight_v''']
lowercase : Any = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase : Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase : List[str] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase : Optional[int] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
lowercase : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase : Optional[int] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
lowercase : str = checkpoint["output_conv.1.weight_g"]
lowercase : Optional[int] = checkpoint["output_conv.1.weight_v"]
lowercase : Optional[int] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _A ( A ,A ,A ,A=None ,A=None ,) -> List[Any]:
if config_path is not None:
lowercase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase : Dict = SpeechTaHifiGanConfig()
lowercase : Dict = SpeechTaHifiGan(_lowerCAmelCase )
lowercase : List[Any] = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] ,_lowerCAmelCase ,_lowerCAmelCase )
lowercase : Any = np.load(_lowerCAmelCase )
lowercase : Any = stats[0].reshape(-1 )
lowercase : Dict = stats[1].reshape(-1 )
lowercase : Tuple = torch.from_numpy(_lowerCAmelCase ).float()
lowercase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase : Dict = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 701 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : Dict = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : Any = F'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Dict = F'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : Tuple = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : str = F'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : Optional[Any] = F'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : Any = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Optional[int] = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : List[str] = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Union[str, Any] = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Optional[Any] = """mid_block.attentions.0."""
lowerCAmelCase : Optional[int] = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : str = F'''mid_block.resnets.{j}.'''
lowerCAmelCase : int = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A ) -> str:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase : List[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase : Optional[Any] = v.replace(A ,A )
lowercase : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase : Any = v.replace(A ,A )
lowercase : Tuple = v
lowercase : Any = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : List[Any] = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Tuple = F'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : Any = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : int = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : Any = F'''mid_block.resnets.{i}.'''
lowerCAmelCase : Optional[int] = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : int = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def _A ( A ) -> Optional[Any]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape ,1 ,1 )
def _A ( A ) -> List[str]:
lowercase : Tuple = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase : Union[str, Any] = v.replace(A ,A )
lowercase : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase : str = v.replace(A ,A )
lowercase : Dict = v
lowercase : int = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase : Tuple = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
lowercase : List[Any] = reshape_weight_for_sd(A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : List[Any] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : Optional[Any] = {"""q""": 0, """k""": 1, """v""": 2}
def _A ( A ) -> List[Any]:
lowercase : List[Any] = {}
lowercase : Optional[Any] = {}
lowercase : Optional[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase : int = k[: -len(".q_proj.weight" )]
lowercase : List[str] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase : Tuple = [None, None, None]
lowercase : List[str] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase : int = k[: -len(".q_proj.bias" )]
lowercase : str = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase : Any = [None, None, None]
lowercase : Tuple = v
continue
lowercase : str = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A )
lowercase : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase : List[str] = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A )
lowercase : List[Any] = torch.cat(A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase : Tuple = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A )
lowercase : int = torch.cat(A )
return new_state_dict
def _A ( A ) -> Union[str, Any]:
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : List[str] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : str = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : List[str] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Any = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : Dict = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Dict = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Union[str, Any] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Any = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Any = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Optional[Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Optional[Any] = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Any = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : str = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : Dict = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : Any = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 425 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict ) -> Tuple:
"""simple docstring"""
A = checkpoint
A = {}
A = vae_state_dict["""encoder.conv_in.weight"""]
A = vae_state_dict["""encoder.conv_in.bias"""]
A = vae_state_dict["""encoder.conv_out.weight"""]
A = vae_state_dict["""encoder.conv_out.bias"""]
A = vae_state_dict["""encoder.norm_out.weight"""]
A = vae_state_dict["""encoder.norm_out.bias"""]
A = vae_state_dict["""decoder.conv_in.weight"""]
A = vae_state_dict["""decoder.conv_in.bias"""]
A = vae_state_dict["""decoder.conv_out.weight"""]
A = vae_state_dict["""decoder.conv_out.bias"""]
A = vae_state_dict["""decoder.norm_out.weight"""]
A = vae_state_dict["""decoder.norm_out.bias"""]
A = vae_state_dict["""quant_conv.weight"""]
A = vae_state_dict["""quant_conv.bias"""]
A = vae_state_dict["""post_quant_conv.weight"""]
A = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
A = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
A = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the decoder up blocks only
A = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
A = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(UpperCamelCase__ )
}
for i in range(UpperCamelCase__ ):
A = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
A = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
A = renew_vae_resnet_paths(UpperCamelCase__ )
A = {"""old""": f'down.{i}.block', """new""": f'down_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
A = [key for key in vae_state_dict if """encoder.mid.block""" in key]
A = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
A = renew_vae_resnet_paths(UpperCamelCase__ )
A = {"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
A = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
A = renew_vae_attention_paths(UpperCamelCase__ )
A = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A = num_up_blocks - 1 - i
A = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
A = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
A = renew_vae_resnet_paths(UpperCamelCase__ )
A = {"""old""": f'up.{block_id}.block', """new""": f'up_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
A = [key for key in vae_state_dict if """decoder.mid.block""" in key]
A = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
A = renew_vae_resnet_paths(UpperCamelCase__ )
A = {"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
A = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
A = renew_vae_attention_paths(UpperCamelCase__ )
A = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
return new_checkpoint
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str , ) -> Dict:
"""simple docstring"""
A = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
A = io.BytesIO(r.content )
A = OmegaConf.load(UpperCamelCase__ )
A = 5_12
A = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
A = {}
with safe_open(UpperCamelCase__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
A = f.get_tensor(UpperCamelCase__ )
else:
A = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )["""state_dict"""]
# Convert the VAE model.
A = create_vae_diffusers_config(UpperCamelCase__ , image_size=UpperCamelCase__ )
A = custom_convert_ldm_vae_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
A = AutoencoderKL(**UpperCamelCase__ )
vae.load_state_dict(UpperCamelCase__ )
vae.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
_lowercase : str = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 641 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
A = HfArgumentParser(UpperCamelCase__ )
A = parser.parse_args_into_dataclasses()[0]
A = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
A = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
A = """"""
A = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
A = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 641 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = StableUnCLIPImgaImgPipeline
__A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__A : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__A : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A : Any = frozenset([] )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = 32
a__ : Dict = embedder_hidden_size
# image encoding components
a__ : Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
a__ : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase , projection_dim=lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
a__ : Dict = StableUnCLIPImageNormalizer(embedding_dim=lowercase)
a__ : List[str] = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
a__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
a__ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ))
torch.manual_seed(0)
a__ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase , layers_per_block=1 , upcast_attention=lowercase , use_linear_projection=lowercase , )
torch.manual_seed(0)
a__ : Dict = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=lowercase , steps_offset=1 , )
torch.manual_seed(0)
a__ : Tuple = AutoencoderKL()
a__ : Any = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def __lowercase ( self , lowercase , lowercase=0 , lowercase=True) -> str:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : Tuple = torch.manual_seed(lowercase)
else:
a__ : Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase)).to(lowercase)
if pil_image:
a__ : int = input_image * 0.5 + 0.5
a__ : Union[str, Any] = input_image.clamp(0 , 1)
a__ : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
a__ : Optional[int] = DiffusionPipeline.numpy_to_pil(lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Union[str, Any] = self.get_dummy_components()
a__ : List[str] = StableUnCLIPImgaImgPipeline(**lowercase)
a__ : Dict = sd_pipe.to(lowercase)
sd_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[str] = self.get_dummy_inputs(lowercase)
inputs.update({'image_embeds': None})
a__ : Optional[Any] = sd_pipe(**lowercase).images
a__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ : Tuple = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
a__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
a__ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa)
pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : str = torch.Generator(device='cpu').manual_seed(0)
a__ : str = pipe(lowercase , 'anime turle' , generator=lowercase , output_type='np')
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
a__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
a__ : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Any = torch.Generator(device='cpu').manual_seed(0)
a__ : List[Any] = pipe(lowercase , 'anime turle' , generator=lowercase , output_type='np')
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
a__ : List[Any] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
lowercase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a__ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 392 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def A_ ( A__ ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def A_ ( A__ ) -> List[str]:
from transformers.testing_utils import pytest_terminal_summary_main
a__ : List[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 392 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
__lowerCAmelCase = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Union[str, Any] ,**_a : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase_ (__a : Image ):
"""simple docstring"""
_a : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __lowercase ( self : Optional[int] ,_a : Tuple ,_a : Any ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = DepthEstimationPipeline(model=_a ,image_processor=_a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[str] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} ,_a )
import datasets
_a : str = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_a : List[str] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] ,_a ,)
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = 'Intel/dpt-large'
_a : str = pipeline('depth-estimation' ,model=_a )
_a : Any = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
_a : Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) ,2.662 )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 229 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : Any ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=__SCREAMING_SNAKE_CASE ) as mock_head:
UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _UpperCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=__SCREAMING_SNAKE_CASE ) as mock_head:
UpperCAmelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase = tempfile.mktemp()
with open(__SCREAMING_SNAKE_CASE ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = AlbertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
finally:
os.remove(__SCREAMING_SNAKE_CASE )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _UpperCAmelCase ( self : Any ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __magic_name__ ( unittest.TestCase):
_UpperCAmelCase : Tuple = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _UpperCAmelCase ( cls : Dict ):
UpperCAmelCase = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _UpperCAmelCase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"vocab.txt" )
with open(__SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ,repo_id="test-tokenizer" ,push_to_hub=__SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _UpperCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"vocab.txt" )
with open(__SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__SCREAMING_SNAKE_CASE ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=__SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _UpperCAmelCase ( self : Union[str, Any] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"vocab.txt" )
with open(__SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase = CustomTokenizer(__SCREAMING_SNAKE_CASE )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"vocab.txt" )
with open(__SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
UpperCAmelCase = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' ,use_fast=__SCREAMING_SNAKE_CASE ,trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def _UpperCAmelCase ( self : int ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase = Trie()
UpperCAmelCase = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(__SCREAMING_SNAKE_CASE ,["AB", "C"] )
| 405 |
from collections import Counter
from timeit import timeit
def __UpperCamelCase ( _lowerCAmelCase = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __UpperCamelCase ( _lowerCAmelCase = "" ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return True
UpperCAmelCase = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase = {}
for character in lower_case_input_str:
UpperCAmelCase = character_freq_dict.get(_lowerCAmelCase , 0 ) + 1
UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCamelCase ( _lowerCAmelCase = "" ):
"""simple docstring"""
print("\nFor string = " , _lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
__lowerCAmelCase =input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__lowerCAmelCase =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 405 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case_ = str(abs(__UpperCAmelCase ) )
snake_case_ = [list(__UpperCAmelCase ) for char in range(len(__UpperCAmelCase ) )]
for index in range(len(__UpperCAmelCase ) ):
num_transpositions[index].pop(__UpperCAmelCase )
return max(
int(''''''.join(list(__UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 640 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = [False] * len(__UpperCAmelCase )
snake_case_ = []
queue.append(__UpperCAmelCase )
snake_case_ = True
while queue:
snake_case_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCAmelCase )
snake_case_ = True
snake_case_ = u
return visited[t]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = [-1] * (len(__UpperCAmelCase ))
snake_case_ = 0
while bfs(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = float('''Inf''' )
snake_case_ = sink
while s != source:
# Find the minimum value in select path
snake_case_ = min(__UpperCAmelCase, graph[parent[s]][s] )
snake_case_ = parent[s]
max_flow += path_flow
snake_case_ = sink
while v != source:
snake_case_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ = parent[v]
return max_flow
a : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a ,a : int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 640 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__lowerCAmelCase = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def snake_case_ ( ) -> Optional[int]:
lowercase__: Dict = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__: Dict = bs[:]
lowercase__: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
lowercase__: str = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def snake_case_ ( snake_case ) -> List[str]:
lowercase__: Optional[Any] = set()
lowercase__: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__: List[Any] = char
return pairs
class __a ( UpperCamelCase_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
lowercase__: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
lowercase__: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowercase__: Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
lowercase__: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
lowercase__: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
lowercase__: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: Any = json.load(UpperCamelCase__ )
lowercase__: Any = {v: k for k, v in self.encoder.items()}
lowercase__: Any = errors # how to handle errors in decoding
lowercase__: List[str] = bytes_to_unicode()
lowercase__: Dict = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
lowercase__: Optional[int] = merges_handle.read().split('\n' )[1:-1]
lowercase__: Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__: Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowercase__: Optional[Any] = {}
lowercase__: Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__: Dict = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__: str = tuple(UpperCamelCase__ )
lowercase__: Optional[int] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowercase__: Union[str, Any] = min(UpperCamelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__: Dict = bigram
lowercase__: str = []
lowercase__: Union[str, Any] = 0
while i < len(UpperCamelCase__ ):
try:
lowercase__: Union[str, Any] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__: int = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__: Any = tuple(UpperCamelCase__ )
lowercase__: int = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowercase__: List[Any] = get_pairs(UpperCamelCase__ )
lowercase__: Any = ''' '''.join(UpperCamelCase__ )
lowercase__: List[Any] = word
return word
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = []
for token in re.findall(self.pat , UpperCamelCase__ ):
lowercase__: List[str] = ''''''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Union[str, Any] = ''''''.join(UpperCamelCase__ )
lowercase__: Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Any:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
lowercase__: List[Any] = 0
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowercase__: Optional[Any] = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Union[str, Any] = [self.cls_token_id]
lowercase__: List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
'''simple docstring'''
lowercase__: Tuple = [self.sep_token_id]
lowercase__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
lowercase__: Union[str, Any] = ''' ''' + text
return (text, kwargs)
| 712 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 335 | 0 |
import operator
def lowercase__ ( __snake_case : list , __snake_case : bool = False , __snake_case : list | None = None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = operator.lt if reverse else operator.gt
UpperCAmelCase_ : Tuple = solution or []
if not arr:
return solution
UpperCAmelCase_ : str = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Dict = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 406 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase )
UpperCAmelCase_ : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase_ : Optional[int] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='crop_size' )
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Dict = do_rescale
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : str = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : List[str] = rescale_factor
UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase_ : Any = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ : str = (size['height'], size['width'])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> np.ndarray:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> BatchFeature:
UpperCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[str] = get_size_dict(_UpperCamelCase , param_name='crop_size' , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Any = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
UpperCAmelCase_ : Dict = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
UpperCAmelCase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 406 | 1 |
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ =os.path.join(git_repo_path, "src", "diffusers")
class A__( unittest.TestCase ):
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__SCREAMING_SNAKE_CASE = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__SCREAMING_SNAKE_CASE = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''torch_and_transformers_and_onnx''' )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE )
self.assertIn('''torch_and_transformers''' , __SCREAMING_SNAKE_CASE )
self.assertIn('''flax_and_transformers''' , __SCREAMING_SNAKE_CASE )
self.assertIn('''torch_and_transformers_and_onnx''' , __SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__SCREAMING_SNAKE_CASE = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__SCREAMING_SNAKE_CASE = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE )
| 690 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict:
__SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid"""
raise ValueError(UpperCAmelCase__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def _a ( UpperCAmelCase__ ) -> dict:
__SCREAMING_SNAKE_CASE = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__SCREAMING_SNAKE_CASE = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__SCREAMING_SNAKE_CASE = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 690 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class a__ ( A__ ):
A = 'data2vec-audio'
def __init__( self : Any,_A : Union[str, Any]=32,_A : Any=768,_A : int=12,_A : Optional[Any]=12,_A : str=3072,_A : Dict="gelu",_A : Tuple=0.1,_A : Union[str, Any]=0.1,_A : Union[str, Any]=0.1,_A : Optional[int]=0.0,_A : Tuple=0.1,_A : List[str]=0.1,_A : Optional[int]=0.02,_A : Dict=1E-5,_A : Tuple="gelu",_A : List[Any]=(512, 512, 512, 512, 512, 512, 512),_A : Tuple=(5, 2, 2, 2, 2, 2, 2),_A : List[Any]=(10, 3, 3, 3, 3, 2, 2),_A : Union[str, Any]=False,_A : str=16,_A : Union[str, Any]=19,_A : Optional[Any]=5,_A : List[str]=0.05,_A : Any=10,_A : Any=2,_A : str=0.0,_A : Optional[Any]=10,_A : str=0,_A : str="sum",_A : str=False,_A : Optional[Any]=False,_A : Optional[Any]=256,_A : Dict=(512, 512, 512, 512, 1500),_A : Dict=(5, 3, 3, 1, 1),_A : Optional[int]=(1, 2, 3, 1, 1),_A : List[str]=512,_A : str=0,_A : Optional[Any]=1,_A : Optional[int]=2,_A : Union[str, Any]=False,_A : Optional[Any]=3,_A : Union[str, Any]=2,_A : Any=3,_A : Union[str, Any]=None,**_A : Optional[int],):
"""simple docstring"""
super().__init__(**_A,pad_token_id=_A,bos_token_id=_A,eos_token_id=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : int = feat_extract_activation
SCREAMING_SNAKE_CASE_ : Optional[int] = list(_A )
SCREAMING_SNAKE_CASE_ : int = list(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = conv_bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : Union[str, Any] = conv_pos_kernel_size
SCREAMING_SNAKE_CASE_ : Any = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : List[str] = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : List[str] = final_dropout
SCREAMING_SNAKE_CASE_ : str = layerdrop
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE_ : List[Any] = mask_time_length
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : str = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_feature_length
SCREAMING_SNAKE_CASE_ : List[str] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_ : List[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : Any = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ : List[Any] = add_adapter
SCREAMING_SNAKE_CASE_ : Tuple = adapter_kernel_size
SCREAMING_SNAKE_CASE_ : Tuple = adapter_stride
SCREAMING_SNAKE_CASE_ : int = num_adapter_layers
SCREAMING_SNAKE_CASE_ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : int = list(_A )
SCREAMING_SNAKE_CASE_ : Dict = list(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = xvector_output_dim
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 216 | class a__ :
def __init__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : Any,_A : List[Any],_A : Optional[Any],_A : Optional[int] ):
"""simple docstring"""
self.add_vertex(_A )
self.add_vertex(_A )
if head == tail:
return
SCREAMING_SNAKE_CASE_ : List[Any] = weight
SCREAMING_SNAKE_CASE_ : List[str] = weight
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ : int = list(edges[i] )
edges.sort(key=lambda _A : e[2] )
for i in range(len(_A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ : Dict = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
SCREAMING_SNAKE_CASE_ : List[str] = weight
def __str__( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _A : Union[str, Any]=None,_A : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
if edges is None:
SCREAMING_SNAKE_CASE_ : Tuple = []
for vertex in vertices:
g.add_vertex(_A )
for edge in edges:
g.add_edge(*_A )
return g
class a__ :
def __init__( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : int = {}
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Union[str, Any],_A : Tuple ):
"""simple docstring"""
if item in self.parent:
return self.find(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = item
SCREAMING_SNAKE_CASE_ : Any = 0
return item
def __UpperCamelCase ( self : Tuple,_A : Dict ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(_A )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Tuple,_A : Any,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.find(_A )
SCREAMING_SNAKE_CASE_ : int = self.find(_A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ : int = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph.num_vertices
SCREAMING_SNAKE_CASE_ : Any = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ : List[str] = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ : List[Any] = -1
SCREAMING_SNAKE_CASE_ : str = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = edge
SCREAMING_SNAKE_CASE_ : List[str] = union_find.find(_A )
SCREAMING_SNAKE_CASE_ : Dict = union_find.find(_A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ : Any = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = cheap_edge[vertex]
if union_find.find(_A ) != union_find.find(_A ):
union_find.union(_A,_A )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ : Optional[int] = num_components - 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Graph.build(edges=_A )
return mst
| 216 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> Any:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : str ) -> Dict:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Tuple ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> Any:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Any:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> str:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : int ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[str] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> Any:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 463 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''CLIPFeatureExtractor''']
lowerCamelCase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 463 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Tuple = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''single_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , '''use_cache''' ):
return
SCREAMING_SNAKE_CASE__ = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(A_ , '''decoder_layers''' , A_ )
or getattr(A_ , '''num_decoder_layers''' , A_ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''d_model''' , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs['''past_key_values''']
self.assertEqual(len(A_ ) , A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs['''input_ids'''].shape
for i in range(A_ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
SCREAMING_SNAKE_CASE__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase_ ( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 100 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_A : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {}
with open(lowerCAmelCase_ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = line.strip()
if line:
SCREAMING_SNAKE_CASE__ = line.split()
SCREAMING_SNAKE_CASE__ = line_number
SCREAMING_SNAKE_CASE__ = words[0]
SCREAMING_SNAKE_CASE__ = value
return result
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ = value[0]
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value if '''lm_head''' in full_key else value[0]
_A : Union[str, Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = '''weight'''
else:
SCREAMING_SNAKE_CASE__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int:
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ = read_txt_into_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_A : List[str] = parser.parse_args()
_A : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 100 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase__ :Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : List[Any] , A__ : str , A__ : bool , A__ : str = None , A__ : list = None ):
"""simple docstring"""
__lowerCamelCase : str = None
__lowerCamelCase : int = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowerCamelCase : Union[str, Any] = os.path.abspath("""examples""" )
for item in os.listdir(A__ ):
if item not in EXCLUDE_EXAMPLES:
__lowerCamelCase : str = os.path.join(A__ , A__ )
if os.path.isfile(A__ ) and ".py" in item_path:
with self.subTest(
tested_script=A__ , feature_script=A__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowerCamelCase : Any = compare_against_test(
os.path.join(A__ , A__ ) , A__ , A__ , A__ )
__lowerCamelCase : Union[str, Any] = """\n""".join(A__ )
if special_strings is not None:
for string in special_strings:
__lowerCamelCase : Optional[Any] = diff.replace(A__ , """""" )
self.assertEqual(A__ , """""" )
def a_ ( self : Tuple ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , A__ )
self.one_complete_example("""complete_nlp_example.py""" , A__ )
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : str = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowerCamelCase : Any = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , A__ , A__ , A__ )
self.one_complete_example("""complete_cv_example.py""" , A__ , A__ , A__ )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[str] = False
@classmethod
def a_ ( cls : str ):
"""simple docstring"""
super().setUpClass()
__lowerCamelCase : Dict = tempfile.mkdtemp()
__lowerCamelCase : Dict = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowerCamelCase : Dict = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def a_ ( cls : Optional[Any] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[str] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
__lowerCamelCase : Optional[int] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def a_ ( self : Any ):
"""simple docstring"""
__lowerCamelCase : Tuple = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
__lowerCamelCase : str = run_command(self._launch_args + testargs , return_stdout=A__ )
self.assertNotIn("""epoch 0:""" , A__ )
self.assertIn("""epoch 1:""" , A__ )
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Dict = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
__lowerCamelCase : List[Any] = run_command(self._launch_args + testargs , return_stdout=A__ )
if torch.cuda.is_available():
__lowerCamelCase : Tuple = torch.cuda.device_count()
else:
__lowerCamelCase : int = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , A__ )
self.assertIn("""epoch 1:""" , A__ )
else:
self.assertIn("""epoch 0:""" , A__ )
self.assertIn("""epoch 1:""" , A__ )
@slow
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowerCamelCase : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=A__ )
__lowerCamelCase : Dict = re.findall("""({.+})""" , A__ )
__lowerCamelCase : int = [r for r in results if """accuracy""" in r][-1]
__lowerCamelCase : int = ast.literal_eval(A__ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : int = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__lowerCamelCase : List[Any] = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A__ , """tracking""" ) ) )
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Dict = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 700 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ :List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
snake_case__ : Optional[Any] = XLMRobertaTokenizer
snake_case__ : str = XLMRobertaTokenizerFast
snake_case__ : str = True
snake_case__ : int = True
def a_ ( self : Dict ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = XLMRobertaTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """<pad>"""
__lowerCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A__ ) , 1002 )
def a_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Any = XLMRobertaTokenizer(A__ , keep_accents=A__ )
__lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self : Any ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
__lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(A__ , **A__ )
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(A__ )
__lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowerCamelCase : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
__lowerCamelCase : str = tokenizer_r.from_pretrained(A__ )
__lowerCamelCase : str = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
__lowerCamelCase : Dict = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
__lowerCamelCase : Any = tokenizer_r.from_pretrained(A__ )
__lowerCamelCase : List[str] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Any = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
__lowerCamelCase : List[str] = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(A__ )
__lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@cached_property
def a_ ( self : str ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def a_ ( self : int ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A__ , f.name )
__lowerCamelCase : int = XLMRobertaTokenizer(f.name , keep_accents=A__ )
__lowerCamelCase : str = pickle.dumps(A__ )
pickle.loads(A__ )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Tuple = self.get_rust_tokenizer()
__lowerCamelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
__lowerCamelCase : int = tokenizer.tokenize(A__ )
__lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
__lowerCamelCase : Any = tokenizer.encode(A__ , add_special_tokens=A__ )
__lowerCamelCase : Dict = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
__lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase : Optional[int] = tokenizer.encode(A__ )
__lowerCamelCase : str = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
@slow
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """Hello World!"""
__lowerCamelCase : str = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowerCamelCase : Optional[Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def a_ ( self : Any ):
"""simple docstring"""
__lowerCamelCase : Tuple = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 483 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase : str = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _A ( SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
if subparsers is not None:
a__ : Optional[int] =subparsers.add_parser("tpu-config" , description=_description )
else:
a__ : Optional[int] =argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a__ : int =parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=SCREAMING_SNAKE_CASE , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=SCREAMING_SNAKE_CASE , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a__ : Tuple =parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=SCREAMING_SNAKE_CASE , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : Union[str, Any] =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
a__ : Dict =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a__ : Optional[int] =defaults.command_file
if not args.command and defaults.commands is not None:
a__ : Optional[Any] =defaults.commands
if not args.tpu_name:
a__ : Union[str, Any] =defaults.tpu_name
if not args.tpu_zone:
a__ : str =defaults.tpu_zone
if args.accelerate_version == "dev":
a__ : List[str] ="git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a__ : Optional[int] ="accelerate -U"
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
a__ : Union[str, Any] =f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a__ : List[Any] =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
a__ : int =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a__ : str =["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
a__ : Optional[int] ="; ".join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a__ : Any =["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(SCREAMING_SNAKE_CASE )}''' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print("Successfully setup pod." )
def _A ( ):
"""simple docstring"""
a__ : Union[str, Any] =tpu_command_parser()
a__ : str =parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 563 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Optional[Any] =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : Optional[int] =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
a__ : int =4
a__ : Optional[int] =48
a__ : str ="pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : str =[6, 6, 6, 6]
a__ : Optional[int] =60
a__ : Any =[6, 6, 6, 6]
a__ : int ="pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : List[str] =4
a__ : Union[str, Any] ="nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
a__ : str =1
a__ : Optional[Any] =1
a__ : str =126
a__ : Optional[Any] =7
a__ : Optional[int] =2_5_5.0
a__ : str =""
return config
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
a__ : Any =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : str =name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
a__ : Union[str, Any] =name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
a__ : List[Any] =name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
a__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a__ : int =name.replace("attn" , "attention.self" )
if "norm1" in name:
a__ : List[Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a__ : Optional[int] =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a__ : Dict =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a__ : Optional[int] =name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
a__ : List[Any] =name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
a__ : Optional[int] =name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
a__ : Optional[Any] =name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
a__ : List[str] =name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
a__ : List[Any] =name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
a__ : Dict ="layernorm.weight"
if name == "norm.bias":
a__ : Any ="layernorm.bias"
if "conv_first" in name:
a__ : Tuple =name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
a__ : List[str] =name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
a__ : str =name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
a__ : Any =name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
a__ : Optional[int] =name.replace("upsample.2" , "upsample.convolution_1" )
a__ : Any ="upsample." + name
elif config.upsampler == "pixelshuffledirect":
a__ : str =name.replace("upsample.0.weight" , "upsample.conv.weight" )
a__ : Any =name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
a__ : Dict ="swin2sr." + name
return name
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ : Dict =orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
a__ : str =key.split("." )
a__ : Optional[int] =int(key_split[1] )
a__ : Dict =int(key_split[4] )
a__ : List[Any] =config.embed_dim
if "weight" in key:
a__ : List[Any] =val[:dim, :]
a__ : List[str] =val[dim : dim * 2, :]
a__ : Dict =val[-dim:, :]
else:
a__ : int =val[:dim]
a__ : Union[str, Any] =val[dim : dim * 2]
a__ : Tuple =val[-dim:]
pass
else:
a__ : Union[str, Any] =val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =get_config(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE )
model.eval()
a__ : Union[str, Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )
a__ : Dict =convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ , a__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("Missing keys when converting: {}".format(SCREAMING_SNAKE_CASE ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
a__ : str ="https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
a__ : Dict =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
a__ : List[str] =126 if "Jpeg" in checkpoint_url else 256
a__ : Optional[Any] =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a__ : Dict =transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
if config.num_channels == 1:
a__ : Tuple =pixel_values[:, 0, :, :].unsqueeze(1 )
a__ : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
a__ : str =torch.Size([1, 3, 512, 512] )
a__ : List[str] =torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : List[Any] =torch.Size([1, 3, 1_024, 1_024] )
a__ : List[str] =torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
a__ : Tuple =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[int] =torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : Tuple =torch.Size([1, 3, 512, 512] )
a__ : str =torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : Optional[int] =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[Any] =torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Looks ok!" )
a__ : int ={
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
a__ : Any =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 563 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : List[str] = 1_6
a : List[Any] = 3_2
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 1_6 ) -> Any:
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase : Optional[Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(_lowercase ),
"""validation""": dataset["""train"""].select(_lowercase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase : Any = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase : List[str] = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase : Any = 8
else:
UpperCAmelCase : Tuple = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase : Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase : Tuple = DataLoader(
tokenized_datasets["""test"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
# New Code #
UpperCAmelCase : List[Any] = []
# Download the dataset
UpperCAmelCase : List[Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
UpperCAmelCase : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : List[str] = config["""lr"""]
UpperCAmelCase : str = int(config["""num_epochs"""] )
UpperCAmelCase : str = int(config["""seed"""] )
UpperCAmelCase : Tuple = int(config["""batch_size"""] )
UpperCAmelCase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase : str = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
UpperCAmelCase : Optional[int] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
UpperCAmelCase : Tuple = get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase : Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
UpperCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=1_0_0 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase : Any = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase : Optional[Any] = model(**_lowercase )
UpperCAmelCase : Any = outputs.loss
UpperCAmelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**_lowercase )
UpperCAmelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase : Optional[Any] = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**_lowercase )
UpperCAmelCase : List[str] = outputs.logits
UpperCAmelCase : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase : Dict = torch.cat(_lowercase , dim=0 )
UpperCAmelCase : int = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase : List[Any] = metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print("""Average test metrics from all folds:""" , _lowercase )
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=_lowercase , default=3 , help="""The number of splits to perform across the dataset""" )
UpperCAmelCase : Optional[int] = parser.parse_args()
UpperCAmelCase : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowercase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__A ,cache_dir=__A )
_lowercase = [t[-1] for t in os.walk(os.path.join(__A ,os.listdir(__A )[0] ,'snapshots' ) )]
_lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> str:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__A )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 4
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__A ,dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__A ) == num_samples
def __UpperCAmelCase ( self : str ) -> List[Any]:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='flax' ,safety_checker=__A )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__A )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = FlaxDDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,set_alpha_to_one=__A ,steps_offset=1 ,)
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,scheduler=__A ,safety_checker=__A ,)
_lowercase = scheduler.create_state()
_lowercase = scheduler_state
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = jax.random.split(jax.random.PRNGKey(0 ) ,__A )
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__A ,)
_lowercase = replicate(__A )
_lowercase = pipeline.prepare_inputs(__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_lowercase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__A ,use_memory_efficient_attention=__A ,)
_lowercase = replicate(__A )
_lowercase = pipeline.prepare_inputs(__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,jit=__A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_lowercase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2 | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if n_term == "":
return []
lowerCAmelCase : list = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : Optional[Any] = logging.getLogger()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = json.load(_UpperCAmelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
__UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
lowerCAmelCase = time()
xla_spawn.main()
lowerCAmelCase = time()
lowerCAmelCase = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
xla_spawn.main()
| 4 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case):
__snake_case = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_lowercase, '''r''') as f:
__snake_case = f.readlines()
__snake_case = f"class {class_name}("
__snake_case = f"{4 * ' '}def {test_name}("
__snake_case = f"{8 * ' '}{correct_line.split()[0]}"
__snake_case = f"{16 * ' '}{correct_line.split()[0]}"
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = 0
__snake_case = []
for line in lines:
if line.startswith(_lowercase):
__snake_case = True
elif in_class and line.startswith(_lowercase):
__snake_case = True
elif in_class and in_func and (line.startswith(_lowercase) or line.startswith(_lowercase)):
__snake_case = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
__snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}")
__snake_case = False
else:
new_lines.append(_lowercase)
with open(_lowercase, '''w''') as f:
for line in new_lines:
f.write(_lowercase)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=None):
if fail is not None:
with open(_lowercase, '''r''') as f:
__snake_case = {l.strip() for l in f.readlines()}
else:
__snake_case = None
with open(_lowercase, '''r''') as f:
__snake_case = f.readlines()
__snake_case = defaultdict(_lowercase)
for line in correct_lines:
__snake_case = line.split(''';''')
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase)
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__lowercase : Tuple = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 709 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure) | 93 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "dpr"
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase = 0 , **_lowerCAmelCase , ) -> Any:
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 18 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_SCREAMING_SNAKE_CASE = '''scheduler_config.json'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = 1
a : Dict = 2
a : Optional[Any] = 3
a : List[str] = 4
a : Any = 5
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : jnp.ndarray
class __lowercase :
'''simple docstring'''
a : str = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = ["dtype"]
a : str = []
a : List[Any] = True
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase ,subfolder=_lowerCamelCase ,return_unused_kwargs=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase , __lowercase = cls.from_config(_lowerCamelCase ,return_unused_kwargs=_lowerCamelCase ,**_lowerCamelCase )
if hasattr(_lowerCamelCase ,'''create_state''' ) and getattr(_lowerCamelCase ,'''has_state''' ,_lowerCamelCase ):
__lowercase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ,**_lowerCamelCase ) -> str:
'''simple docstring'''
self.save_config(save_directory=_lowerCamelCase ,push_to_hub=_lowerCamelCase ,**_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _UpperCAmelCase (cls ) -> int:
'''simple docstring'''
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split('''.''' )[0] )
__lowercase = [
getattr(_lowerCamelCase ,_lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase ,_lowerCamelCase )
]
return compatible_classes
def _lowerCAmelCase ( lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : Tuple[int] ):
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]=0.9_99 , lowerCamelCase_ : Union[str, Any]=jnp.floataa ):
def alpha_bar(lowerCamelCase_ : Any ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__lowercase = []
for i in range(lowerCamelCase_ ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class __lowercase :
'''simple docstring'''
a : jnp.ndarray
a : jnp.ndarray
a : jnp.ndarray
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = scheduler.config
if config.trained_betas is not None:
__lowercase = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowercase = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
__lowercase = 1.0 - betas
__lowercase = jnp.cumprod(_lowerCamelCase ,axis=0 )
return cls(
alphas=_lowerCamelCase ,betas=_lowerCamelCase ,alphas_cumprod=_lowerCamelCase ,)
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase = state.alphas_cumprod
__lowercase = alphas_cumprod[timesteps] ** 0.5
__lowercase = sqrt_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
__lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowercase = sqrt_one_minus_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase , __lowercase = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase ( lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
__lowercase , __lowercase = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 710 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56 | 0 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Dict = BigBirdConfig.from_json_file(lowerCamelCase_)
print(f"""Building PyTorch model from configuration: {config}""")
if is_trivia_qa:
lowerCAmelCase__ : List[Any] = BigBirdForQuestionAnswering(lowerCamelCase_)
else:
lowerCAmelCase__ : Union[str, Any] = BigBirdForPreTraining(lowerCamelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCamelCase_ ,lowerCamelCase_ ,is_trivia_qa=lowerCamelCase_)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
__snake_case : Optional[int] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 647 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Any = tokenizer('''Hello there''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Optional[Any] = tokenizer('''Hi I am''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Any = shift_tokens_right(__lowerCamelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ).logits
lowerCAmelCase__ : List[str] = optax.softmax_cross_entropy(__lowerCamelCase ,onehot(__lowerCamelCase ,logits.shape[-1] ) ).mean()
lowerCAmelCase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase__ : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 647 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a__ = logging.getLogger(__name__)
a__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} ,)
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE_ )} ,)
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} ,)
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
snake_case_ : bool = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} ,)
snake_case_ : str = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
snake_case_ : bool = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""")
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
snake_case_ : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """The input training data file (a text file)."""} )
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,)
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} ,)
snake_case_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} ,)
snake_case_ : bool = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
snake_case_ : Optional[int] = field(
default=5 ,metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} ,)
snake_case_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} ,)
snake_case_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
snake_case_ : float = field(
default=0.15 ,metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
snake_case_ : bool = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} ,)
def UpperCamelCase_ ( self : List[Any]) -> Any:
"""simple docstring"""
if self.train_file is not None:
_snake_case : Any = self.train_file.split(""".""")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_snake_case : Optional[Any] = self.validation_file.split(""".""")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : str = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = {c: dataset[c] for c in dataset.column_names}
_snake_case : List[Any] = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case : Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_snake_case : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
_snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
_snake_case : Optional[int] = {}
if data_args.train_file is not None:
_snake_case : int = data_args.train_file
if data_args.validation_file is not None:
_snake_case : List[str] = data_args.validation_file
_snake_case : List[str] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
_snake_case : Dict = """text"""
_snake_case : Any = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Optional[Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_snake_case : str = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
_snake_case : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
_snake_case : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
_snake_case : Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_snake_case : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
_snake_case : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
_snake_case : Dict = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_snake_case : Optional[int] = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_snake_case : int = datasets["""train"""].column_names
else:
_snake_case : Any = datasets["""validation"""].column_names
_snake_case : Optional[Any] = """text""" if """text""" in column_names else column_names[0]
_snake_case : Optional[int] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(SCREAMING_SNAKE_CASE__ : int ):
# Remove empty lines
_snake_case : List[str] = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length )
_snake_case : int = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_snake_case : Optional[Any] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_snake_case : int = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_snake_case : Union[str, Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_snake_case : List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
_snake_case : Dict = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_snake_case : Any = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_snake_case : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_snake_case : Any = model_args.model_name_or_path
else:
_snake_case : Optional[Any] = None
_snake_case : str = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_snake_case : Any = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
_snake_case : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_snake_case : Union[str, Any] = trainer.evaluate()
_snake_case : Dict = math.exp(eval_output["""eval_loss"""] )
_snake_case : Tuple = perplexity
_snake_case : Tuple = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 712 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a__ = logging.getLogger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : torch.nn.Module , SCREAMING_SNAKE_CASE__ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> int:
_snake_case : int = bnb_quantization_config.load_in_abit
_snake_case : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_snake_case : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1:
_snake_case : Tuple = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_snake_case : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_snake_case : Optional[Any] = []
_snake_case : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ )
# compatibility with peft
_snake_case : Union[str, Any] = load_in_abit
_snake_case : Any = load_in_abit
_snake_case : Optional[int] = get_parameter_device(SCREAMING_SNAKE_CASE__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_snake_case : int = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
# convert param to the right dtype
_snake_case : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_snake_case : Union[str, Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_snake_case : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
param.to(SCREAMING_SNAKE_CASE__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_snake_case : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_snake_case : Union[str, Any] = True
_snake_case : Any = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
_snake_case : Dict = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_snake_case : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_snake_case : Tuple = {}
_snake_case : List[str] = special_dtypes
_snake_case : int = no_split_module_classes
_snake_case : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_snake_case : Optional[int] = get_balanced_memory(
SCREAMING_SNAKE_CASE__ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : str = max_memory
_snake_case : Optional[int] = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# check if don't have any quantized module on the cpu
_snake_case : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_snake_case : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if modules_to_not_convert is None:
_snake_case : Tuple = []
_snake_case , _snake_case : str = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> Optional[Any]:
_snake_case : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
_snake_case : List[str] = []
current_key_name.append(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_snake_case : int = """.""".join(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_snake_case : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_snake_case : List[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_snake_case : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_snake_case : List[str] = module.weight.data
if module.bias is not None:
_snake_case : List[Any] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
# Create a copy of the model
with init_empty_weights():
_snake_case : Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_snake_case : Tuple = find_tied_parameters(SCREAMING_SNAKE_CASE__ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case : Optional[Any] = sum(SCREAMING_SNAKE_CASE__ , [] )
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) > 0
# Check if it is a base model
_snake_case : str = False
if hasattr(SCREAMING_SNAKE_CASE__ , """base_model_prefix""" ):
_snake_case : List[Any] = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case : str = list(model.named_children() )
_snake_case : Dict = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case : Optional[int] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ )
# remove ".weight" from the keys
_snake_case : Union[str, Any] = [""".weight""", """.bias"""]
_snake_case : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE__ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE__ )
return filtered_module_names
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ):
return True
return False
def lowercase ( SCREAMING_SNAKE_CASE__ : nn.Module ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = param_name
_snake_case : List[Any] = model
if "." in tensor_name:
_snake_case : str = tensor_name.split(""".""" )
for split in splits[:-1]:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_snake_case : Tuple = new_module
_snake_case : Dict = splits[-1]
# offload weights
_snake_case : List[str] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , )
else:
offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """meta""" , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
| 198 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
snake_case__ : List[Any] = '''__DUMMY_TRANSFORMERS_USER__'''
snake_case__ : Optional[Any] = '''Dummy User'''
snake_case__ : Optional[Any] = '''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'''
snake_case__ : Tuple = '''https://hub-ci.huggingface.co'''
snake_case__ : Tuple = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
snake_case__ : Any = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
snake_case__ : List[Any] = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowercase ( _lowerCAmelCase ):
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowerCAmelCase )
@pytest.fixture
def lowercase ( _lowerCAmelCase ):
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowerCAmelCase )
@pytest.fixture
def lowercase ( _lowerCAmelCase ):
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowerCAmelCase )
@pytest.fixture
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
HfFolder.save_token(_lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def lowercase ( ):
return HfApi(endpoint=_lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = HfFolder.get_token()
HfFolder.save_token(_lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCAmelCase )
@pytest.fixture
def lowercase ( _lowerCAmelCase ):
def _cleanup_repo(_lowerCAmelCase ):
hf_api.delete_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def lowercase ( _lowerCAmelCase ):
@contextmanager
def _temporary_repo(_lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase__ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" , private=_lowerCAmelCase )
hf_api.upload_file(
token=_lowerCAmelCase , path_or_fileobj=str(_lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase__ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" , private=_lowerCAmelCase )
hf_api.upload_file(
token=_lowerCAmelCase , path_or_fileobj=str(_lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=_lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase__ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" , private=_lowerCAmelCase )
hf_api.upload_file(
token=_lowerCAmelCase , path_or_fileobj=str(_lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=_lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCAmelCase , token=_lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return hf_private_dataset_repo_zipped_img_data_
| 392 |
from ...processing_utils import ProcessorMixin
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ["image_processor", "feature_extractor"]
UpperCamelCase__ : List[str] = "TvltImageProcessor"
UpperCamelCase__ : List[str] = "TvltFeatureExtractor"
def __init__( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ) ->str:
'''simple docstring'''
super().__init__(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
UpperCAmelCase__ = image_processor
UpperCAmelCase__ = feature_extractor
def __call__( self : Tuple , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : int=False , lowerCamelCase_ : str=False , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict , ) ->Any:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
UpperCAmelCase__ = None
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase_ , mask_pixel=lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if images_mixed is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase_ , is_mixed=lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if audio is not None:
UpperCAmelCase__ = self.feature_extractor(
lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , mask_audio=lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase__ = {}
if audio is not None:
output_dict.update(lowerCamelCase_ )
if images is not None:
output_dict.update(lowerCamelCase_ )
if images_mixed_dict is not None:
output_dict.update(lowerCamelCase_ )
return output_dict
@property
def UpperCAmelCase ( self : Optional[int] ) ->Dict:
'''simple docstring'''
UpperCAmelCase__ = self.image_processor.model_input_names
UpperCAmelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 392 | 1 |
import os
import sys
import unittest
lowerCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase : Optional[Any] = os.path.join(git_repo_path, "src", "transformers")
lowerCamelCase : str = "\n{0} = None\n"
lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowerCamelCase : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' )
self.assertIsNone(a_ )
lowerCamelCase_ = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(a_ , 'tokenizers' )
lowerCamelCase_ = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(a_ , 'tensorflow_text' )
lowerCamelCase_ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tokenizers' )
lowerCamelCase_ = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tensorflow_text' )
lowerCamelCase_ = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tokenizers_and_vision' )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , a_ )
self.assertIn('tensorflow_text' , a_ )
self.assertIn('sentencepiece_and_tokenizers' , a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(a_ , '\nCONSTANT = None\n' )
lowerCamelCase_ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
a_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowerCamelCase_ = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCamelCase_ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(a_ , a_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCamelCase_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , a_ )
| 706 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "open-llama"
def __init__( self :Union[str, Any] , __A :Tuple=10_0000 , __A :Dict=4096 , __A :int=1_1008 , __A :Optional[int]=32 , __A :Optional[Any]=32 , __A :Dict="silu" , __A :List[str]=2048 , __A :Dict=0.0_2 , __A :Dict=1E-6 , __A :Union[str, Any]=True , __A :Any=0 , __A :List[Any]=1 , __A :Any=2 , __A :Optional[Any]=False , __A :Tuple=True , __A :Optional[int]=0.1 , __A :Tuple=0.1 , __A :str=True , __A :Union[str, Any]=True , __A :Any=None , **__A :Any , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = rms_norm_eps
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = kwargs.pop(
"""use_memorry_efficient_attention""" , __A )
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_dropout_prob
SCREAMING_SNAKE_CASE__ = use_stable_embedding
SCREAMING_SNAKE_CASE__ = shared_input_output_embedding
SCREAMING_SNAKE_CASE__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , )
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("""type""" , __A )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("""factor""" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 6 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_SCREAMING_SNAKE_CASE =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_SCREAMING_SNAKE_CASE =1
if upper_limit > 0:
_SCREAMING_SNAKE_CASE =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
lowerCamelCase : Any = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 405 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
def lowerCamelCase_() -> Optional[Any]:
UpperCAmelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase_ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase_ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCAmelCase = parser.parse_args()
return args
def lowerCamelCase_(lowerCamelCase_ ) -> int:
def fn(lowerCamelCase_ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase_(lowerCamelCase_ ) -> List[Any]:
UpperCAmelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCAmelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCAmelCase = tf.train.Features(feature=lowerCamelCase_ )
UpperCAmelCase = tf.train.Example(features=lowerCamelCase_ )
UpperCAmelCase = example.SerializeToString()
records.append(lowerCamelCase_ )
return records
def lowerCamelCase_(lowerCamelCase_ ) -> Any:
UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase = min(len(lowerCamelCase_ ) , args.limit )
UpperCAmelCase = dataset.select(range(lowerCamelCase_ ) )
print(F'Limiting the dataset to {args.limit} entries.' )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
else:
UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase = tokenize_function(lowerCamelCase_ )
UpperCAmelCase = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase_ ):
# Concatenate all texts.
UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase = dataset_tokenized.map(lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=1_000 , num_proc=4 )
UpperCAmelCase = 0
UpperCAmelCase = 0
for shard in range(0 , len(lowerCamelCase_ ) , args.shard_size ):
UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase = len(dataset_snapshot["input_ids"] )
UpperCAmelCase = os.path.join(lowerCamelCase_ , F'dataset-{shard_count}-{records_containing}.tfrecord' )
UpperCAmelCase = get_serialized_examples(lowerCamelCase_ )
with tf.io.TFRecordWriter(lowerCamelCase_ ) as out_file:
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase = serialized_examples[i]
out_file.write(lowerCamelCase_ )
print("Wrote file {} containing {} records".format(lowerCamelCase_ , lowerCamelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowerCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = parse_args()
main(args)
| 717 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __magic_name__ ( A__ ):
lowercase : "DiagonalGaussianDistribution"
class __magic_name__ ( A__, A__ ):
lowercase : Union[str, Any] =True
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase__ : Tuple[int] = (64,) , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : float = 0.1_82_15 , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
# pass init params to Decoder
UpperCAmelCase = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , act_fn=UpperCamelCase__ , )
UpperCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
UpperCAmelCase = False
UpperCAmelCase = False
# only relevant if vae tiling is enabled
UpperCAmelCase = self.config.sample_size
UpperCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase = 0.25
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (Encoder, Decoder) ):
UpperCAmelCase = value
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : bool = True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = use_tiling
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.enable_tiling(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = True
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase__ , "set_processor" ):
UpperCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : List[Any] ):
if hasattr(UpperCamelCase__ , "set_processor" ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase = [self.encoder(UpperCamelCase__ ) for x_slice in x.split(1 )]
UpperCAmelCase = torch.cat(UpperCamelCase__ )
else:
UpperCAmelCase = self.encoder(UpperCamelCase__ )
UpperCAmelCase = self.quant_conv(UpperCamelCase__ )
UpperCAmelCase = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
UpperCAmelCase = self.post_quant_conv(UpperCamelCase__ )
UpperCAmelCase = self.decoder(UpperCamelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase = [self._decode(UpperCamelCase__ ).sample for z_slice in z.split(1 )]
UpperCAmelCase = torch.cat(UpperCamelCase__ )
else:
UpperCAmelCase = self._decode(UpperCamelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = min(a.shape[2] , b.shape[2] , UpperCamelCase__ )
for y in range(UpperCamelCase__ ):
UpperCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = min(a.shape[3] , b.shape[3] , UpperCamelCase__ )
for x in range(UpperCamelCase__ ):
UpperCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase = []
for i in range(0 , x.shape[2] , UpperCamelCase__ ):
UpperCAmelCase = []
for j in range(0 , x.shape[3] , UpperCamelCase__ ):
UpperCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase = self.encoder(UpperCamelCase__ )
UpperCAmelCase = self.quant_conv(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
UpperCAmelCase = []
for i, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
UpperCAmelCase = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=2 )
UpperCAmelCase = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase = []
for i in range(0 , z.shape[2] , UpperCamelCase__ ):
UpperCAmelCase = []
for j in range(0 , z.shape[3] , UpperCamelCase__ ):
UpperCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase = self.post_quant_conv(UpperCamelCase__ )
UpperCAmelCase = self.decoder(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
UpperCAmelCase = []
for i, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
UpperCAmelCase = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase = sample
UpperCAmelCase = self.encode(UpperCamelCase__ ).latent_dist
if sample_posterior:
UpperCAmelCase = posterior.sample(generator=UpperCamelCase__ )
else:
UpperCAmelCase = posterior.mode()
UpperCAmelCase = self.decode(UpperCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
| 457 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
from math import factorial
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
a__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 335 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="attention" ):
_UpperCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_UpperCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_UpperCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_UpperCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
if split_mlp_wi:
_UpperCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_UpperCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_UpperCamelCase = (wi_a, wi_a)
else:
_UpperCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
_UpperCamelCase = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , *, lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase = {'''/'''.join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase )
_UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , '''encoder''' , '''attention''' )
_UpperCamelCase = layer_norm
_UpperCamelCase = k.T
_UpperCamelCase = o.T
_UpperCamelCase = q.T
_UpperCamelCase = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
_UpperCamelCase , _UpperCamelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , '''encoder''' , lowerCAmelCase )
_UpperCamelCase = layer_norm
if split_mlp_wi:
_UpperCamelCase = wi[0].T
_UpperCamelCase = wi[1].T
else:
_UpperCamelCase = wi.T
_UpperCamelCase = wo.T
_UpperCamelCase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
_UpperCamelCase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , '''decoder''' , '''self_attention''' )
_UpperCamelCase = layer_norm
_UpperCamelCase = k.T
_UpperCamelCase = o.T
_UpperCamelCase = q.T
_UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
_UpperCamelCase = layer_norm
_UpperCamelCase = k.T
_UpperCamelCase = o.T
_UpperCamelCase = q.T
_UpperCamelCase = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
_UpperCamelCase , _UpperCamelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , '''decoder''' , lowerCAmelCase )
_UpperCamelCase = layer_norm
if split_mlp_wi:
_UpperCamelCase = wi[0].T
_UpperCamelCase = wi[1].T
else:
_UpperCamelCase = wi.T
_UpperCamelCase = wo.T
_UpperCamelCase = old['''decoder/decoder_norm/scale''']
_UpperCamelCase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase = old['''decoder/logits_dense/kernel'''].T
return new
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase = state_dict['''shared.weight''']
return state_dict
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = checkpoints.load_tax_checkpoint(lowerCAmelCase )
_UpperCamelCase = convert_tax_to_pytorch(lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase )
_UpperCamelCase = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
_UpperCamelCase = TaConfig.from_json_file(lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase = TaEncoderModel(lowerCAmelCase )
else:
_UpperCamelCase = TaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print('''Done''' )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowercase : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 105 |
def SCREAMING_SNAKE_CASE ( lowerCAmelCase = 1_000 ):
_UpperCamelCase = 3
_UpperCamelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105 | 1 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
def _lowercase (self : Optional[int] , __a : int ):
raise NotImplementedError()
def _lowercase (self : Union[str, Any] ):
raise NotImplementedError()
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : "AutoTokenizer" , __a : bool = False , **__a : List[str] ):
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = skip_prompt
UpperCAmelCase_ = decode_kwargs
# variables used in the streaming process
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = True
def _lowercase (self : int , __a : Any ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
UpperCAmelCase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
UpperCAmelCase_ = text[self.print_len :]
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(a_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase_ = text[self.print_len :]
self.print_len += len(a_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase_ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(a_ )
self.on_finalized_text(a_ )
def _lowercase (self : Dict ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase_ = text[self.print_len :]
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = True
self.on_finalized_text(a_ , stream_end=a_ )
def _lowercase (self : Optional[Any] , __a : str , __a : bool = False ):
print(a_ , flush=a_ , end="" if not stream_end else None )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : "AutoTokenizer" , __a : bool = False , __a : Optional[float] = None , **__a : int ):
super().__init__(a_ , a_ , **a_ )
UpperCAmelCase_ = Queue()
UpperCAmelCase_ = None
UpperCAmelCase_ = timeout
def _lowercase (self : int , __a : str , __a : bool = False ):
self.text_queue.put(a_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self : str ):
return self
def _lowercase (self : str ):
UpperCAmelCase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 78 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a = 'base_with_context'
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: Dict =weights[f'''layers_{lyr_num}''']
snake_case: str =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Any =ly_weight['attention']
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
snake_case: Dict =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: List[Any] =weights[f'''layers_{lyr_num}''']
snake_case: Tuple =ly_weight['attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
snake_case: Any =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case: List[str] =weights[f'''layers_{lyr_num}''']
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
snake_case: str =ly_weight['self_attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =ly_weight['MultiHeadDotProductAttention_0']
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case: Tuple =jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
snake_case: str =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
snake_case: List[Any] =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
snake_case: Optional[Any] =inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[str] =inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
snake_case: List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
snake_case: Optional[Any] =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: List[Any] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case: Optional[Any] =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __UpperCAmelCase )
snake_case: Optional[Any] =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __UpperCAmelCase )
snake_case: Union[str, Any] =load_decoder(ta_checkpoint['target']['decoder'] , __UpperCAmelCase )
snake_case: int =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
snake_case: Optional[Any] =SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a = parser.parse_args()
main(args)
| 350 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ = bs[:]
SCREAMING_SNAKE_CASE__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["input_ids", "attention_mask"]
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]="replace" , __lowerCamelCase : str="<s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : int=False , **__lowerCamelCase : Dict , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : Any ) -> Union[str, Any]:
return len(self.encoder )
def lowercase_ ( self : Any ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : List[str] , __lowerCamelCase : int ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : str , __lowerCamelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> List[str]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : int , __lowerCamelCase : str ) -> int:
return self.decoder.get(__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ = ''' ''' + text
return (text, kwargs)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowerCamelCase ( datasets.BuilderConfig ):
a__: Optional[datasets.Features] = None
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,):
import pyspark
def generate_fn():
lowerCamelCase_ = df.select('''*''' ,pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
lowerCamelCase_ = df_with_partition_id.select('''*''' ).where(f"part_id = {partition_id}" ).drop('''part_id''' )
lowerCamelCase_ = partition_df.collect()
lowerCamelCase_ = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class __lowerCamelCase ( _BaseExamplesIterable ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , ):
lowerCamelCase_ = df
lowerCamelCase_ = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.split_shard_indices_by_worker(UpperCAmelCase , UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ):
return len(self.partition_order )
class __lowerCamelCase ( datasets.DatasetBuilder ):
a__: Optional[Any] = SparkConfig
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
import pyspark
lowerCamelCase_ = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ = df
lowerCamelCase_ = working_dir
super().__init__(
cache_dir=UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase )
lowerCamelCase_ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def UpperCAmelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase__ ( self , UpperCAmelCase ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
lowerCamelCase_ = self.df.count()
lowerCamelCase_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ = (
self.df.limit(UpperCAmelCase )
.repartition(1 )
.mapInArrow(UpperCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ = min(UpperCAmelCase , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ = self.df.repartition(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
import pyspark
lowerCamelCase_ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
lowerCamelCase_ = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase ) ) if self._working_dir else fpath
lowerCamelCase_ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ = self.config.features
lowerCamelCase_ = self._writer_batch_size
lowerCamelCase_ = self._fs.storage_options
def write_arrow(UpperCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ = next(UpperCAmelCase , UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
lowerCamelCase_ = 0
lowerCamelCase_ = writer_class(
features=UpperCAmelCase , path=working_fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , )
lowerCamelCase_ = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
lowerCamelCase_ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , )
lowerCamelCase_ = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase )
if writer._num_bytes > 0:
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase ) ):
lowerCamelCase_ = os.path.join(os.path.dirname(UpperCAmelCase ) , os.path.basename(UpperCAmelCase ) )
shutil.move(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = (
self.df.mapInArrow(UpperCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = "arrow" , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
self._validate_cache_dir()
lowerCamelCase_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase )
lowerCamelCase_ = not is_remote_filesystem(self._fs )
lowerCamelCase_ = os.path.join if is_local else posixpath.join
lowerCamelCase_ = '''-TTTTT-SSSSS-of-NNNNN'''
lowerCamelCase_ = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
lowerCamelCase_ = path_join(self._output_dir , UpperCAmelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = []
lowerCamelCase_ = []
for task_id, content in self._prepare_split_single(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase )
lowerCamelCase_ = total_num_examples
lowerCamelCase_ = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
lowerCamelCase_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
rename(
UpperCAmelCase , fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , fpath.replace('''TTTTT-SSSSS''' , f"{global_shard_id:05d}" ).replace('''NNNNN''' , f"{total_shards:05d}" ) , )
lowerCamelCase_ = []
lowerCamelCase_ = 0
for i in range(len(UpperCAmelCase ) ):
lowerCamelCase_ , lowerCamelCase_ = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase , len(UpperCAmelCase ) ).map(lambda UpperCAmelCase : _rename_shard(*UpperCAmelCase ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ = 0
lowerCamelCase_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , fpath.replace(UpperCAmelCase , '''''' ) , )
def UpperCAmelCase__ ( self , UpperCAmelCase , ):
return SparkExamplesIterable(self.df )
| 29 |
def lowerCAmelCase_ (lowerCAmelCase__: list ):
"""simple docstring"""
if len(lowerCAmelCase__ ) <= 1:
return [tuple(lowerCAmelCase__ )]
UpperCAmelCase_: List[Any] = []
def generate(lowerCAmelCase__: int , lowerCAmelCase__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase__ )
generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return res
if __name__ == "__main__":
a : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 556 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase : Dict = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase : Tuple = 0
UpperCAmelCase : str = 0xe000
UpperCAmelCase : int = 0xe001
UpperCAmelCase : List[str] = 0xe002
UpperCAmelCase : Dict = 0xe003
UpperCAmelCase : str = 0xe004
# Maps special codepoints to human-readable names.
UpperCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=chr(lowerCAmelCase_) , lowerCAmelCase_ : Optional[Any]=chr(lowerCAmelCase_) , lowerCAmelCase_ : int=chr(lowerCAmelCase_) , lowerCAmelCase_ : List[str]=chr(lowerCAmelCase_) , lowerCAmelCase_ : Union[str, Any]=chr(lowerCAmelCase_) , lowerCAmelCase_ : Dict=chr(lowerCAmelCase_) , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , model_max_length=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowercase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase_ = UNICODE_VOCAB_SIZE
lowercase_ = len(self._special_codepoints)
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return self._unicode_vocab_size
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
return list(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
try:
return ord(lowerCAmelCase_)
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase_)
except TypeError:
raise ValueError(F'''invalid id: {index}''')
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return "".join(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
lowercase_ = [1] + ([0] * len(lowerCAmelCase_)) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase_)) + [1]
return result
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
return ()
| 717 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
while a != 0:
lowercase_ , lowercase_ = b % a, a
return b
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
if gcd(__lowerCAmelCase , __lowerCAmelCase ) != 1:
lowercase_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ = 1, 0, a
lowercase_ , lowercase_ , lowercase_ = 0, 1, m
while va != 0:
lowercase_ = ua // va
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 100 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[Any] = logging.getLogger()
def a__ ( lowercase : str, lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''\n'''.join(UpperCAmelCase__ )
Path(UpperCAmelCase__ ).open('''w''' ).writelines(UpperCAmelCase__ )
lowercase__ : str = """patrickvonplaten/t5-tiny-random"""
lowercase__ : Tuple = """sshleifer/bart-tiny-random"""
lowercase__ : Tuple = """sshleifer/tiny-mbart"""
lowercase__ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_UpperCamelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_UpperCamelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
_UpperCamelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_UpperCamelCase = f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_generate()
assert Path(_UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case__ ( self : Dict , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_UpperCamelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_UpperCamelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
_UpperCamelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCamelCase = str(tmp_dir / '''scores.json''' )
_UpperCamelCase = str(tmp_dir / '''val.target''' )
_dump_articles(_UpperCAmelCase , text['''en'''] )
_dump_articles(_UpperCAmelCase , text['''de'''] )
_UpperCamelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_UpperCamelCase = f"""\n run_eval_search.py\n {model}\n {str(_UpperCAmelCase )}\n {str(_UpperCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
_UpperCamelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
_UpperCamelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCAmelCase ).exists()
os.remove(Path(_UpperCAmelCase ) )
| 98 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = torch.nn.Linear(10 , 10 )
a_ = torch.optim.SGD(model.parameters() , 0.1 )
a_ = Accelerator()
a_ = accelerator.prepare(_UpperCAmelCase )
try:
pickle.loads(pickle.dumps(_UpperCAmelCase ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state() | 483 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase_ ( nn.Module ):
_lowerCAmelCase : int
_lowerCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = hidden_states.shape
SCREAMING_SNAKE_CASE : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
SCREAMING_SNAKE_CASE : Dict = self.conv(lowerCAmelCase__ )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
_lowerCAmelCase : int
_lowerCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv(lowerCAmelCase__ )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
_lowerCAmelCase : int
_lowerCAmelCase : int = None
_lowerCAmelCase : float = 0.0
_lowerCAmelCase : bool = None
_lowerCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : List[str] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : int = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : Optional[Any] = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : Tuple = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = hidden_states
SCREAMING_SNAKE_CASE : List[Any] = self.norma(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.swish(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.conva(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
SCREAMING_SNAKE_CASE : Tuple = hidden_states + temb
SCREAMING_SNAKE_CASE : List[str] = self.norma(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = nn.swish(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 464 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ : Tuple = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = XGLMTokenizer
lowerCamelCase_ : Optional[Any] = XGLMTokenizerFast
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Union[str, Any] = True
def _lowercase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Any = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = "<pad>"
lowerCamelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1008 )
def _lowercase ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
lowerCamelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : str = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowercase ( self ) -> Dict:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self ) -> str:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
lowerCamelCase : Optional[int] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__ )
lowerCamelCase : Optional[Any] = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : str = self.get_rust_tokenizer()
lowerCamelCase : List[Any] = "I was born in 92000, and this is falsé."
lowerCamelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
lowerCamelCase : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase : List[str] = self.get_rust_tokenizer()
lowerCamelCase : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCamelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = "Hello World!"
lowerCamelCase : Any = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowercase ( self ) -> str:
lowerCamelCase : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
lowerCamelCase : Tuple = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[str] = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 311 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ ) | 494 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
A_ :int = '''src/transformers'''
# Matches is_xxx_available()
A_ :Optional[int] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
A_ :List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ :Any = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
A_ :int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
A_ :Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ :Tuple = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ :Optional[int] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ :List[str] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
A_ :int = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
A_ :str = re.compile(R'''^\s*try:''')
# Catches a line with else:
A_ :Dict = re.compile(R'''^\s*else:''')
def A ( a_ ) -> Any:
if _re_test_backend.search(a_ ) is None:
return None
__UpperCamelCase : int =[b[0] for b in _re_backend.findall(a_ )]
backends.sort()
return "_and_".join(a_ )
def A ( a_ ) -> int:
with open(a_ ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
__UpperCamelCase : Tuple =f.readlines()
__UpperCamelCase : List[str] =0
while line_index < len(a_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a_ ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCamelCase : List[str] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__UpperCamelCase : List[str] =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a_ ):
__UpperCamelCase : Any =_re_one_line_import_struct.search(a_ ).groups()[0]
__UpperCamelCase : Tuple =re.findall('\[([^\]]+)\]' ,a_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__UpperCamelCase : Union[str, Any] =_re_import_struct_key_value.search(a_ )
if single_line_import_search is not None:
__UpperCamelCase : Optional[int] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(a_ ) > 0]
objects.extend(a_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__UpperCamelCase : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCamelCase : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase : Union[str, Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase : Union[str, Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__UpperCamelCase : Any =lines[line_index]
if _re_import_struct_add_one.search(a_ ) is not None:
objects.append(_re_import_struct_add_one.search(a_ ).groups()[0] )
elif _re_import_struct_add_many.search(a_ ) is not None:
__UpperCamelCase : Tuple =_re_import_struct_add_many.search(a_ ).groups()[0].split(', ' )
__UpperCamelCase : Dict =[obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_between_brackets.search(a_ ) is not None:
__UpperCamelCase : Any =_re_between_brackets.search(a_ ).groups()[0].split(', ' )
__UpperCamelCase : str =[obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_quote_object.search(a_ ) is not None:
objects.append(_re_quote_object.search(a_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__UpperCamelCase : int =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCamelCase : List[str] =[]
while (
line_index < len(a_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__UpperCamelCase : Tuple =lines[line_index]
__UpperCamelCase : Tuple =_re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCamelCase : Optional[int] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(a_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCamelCase : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__UpperCamelCase : Optional[int] =lines[line_index]
__UpperCamelCase : str =_re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCamelCase : Union[str, Any] =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A ( a_ ,a_ ) -> Optional[int]:
def find_duplicates(a_ ):
return [k for k, v in collections.Counter(a_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCamelCase : Optional[Any] =[]
for key in import_dict_objects.keys():
__UpperCamelCase : Any =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__UpperCamelCase : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCamelCase : Optional[int] ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A ( ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] =[]
for root, _, files in os.walk(a_ ):
if "__init__.py" in files:
__UpperCamelCase : List[str] =os.path.join(a_ ,'__init__.py' )
__UpperCamelCase : Union[str, Any] =parse_init(a_ )
if objects is not None:
__UpperCamelCase : Tuple =analyze_results(*a_ )
if len(a_ ) > 0:
__UpperCamelCase : str =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(a_ ) )
if len(a_ ) > 0:
raise ValueError('\n\n'.join(a_ ) )
def A ( ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] =[]
for path, directories, files in os.walk(a_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(a_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a_ ) / folder).glob('*.py' ) ) ) == 0:
continue
__UpperCamelCase : int =str((Path(a_ ) / folder).relative_to(a_ ) )
__UpperCamelCase : Optional[Any] =short_path.replace(os.path.sep ,'.' )
submodules.append(a_ )
for fname in files:
if fname == "__init__.py":
continue
__UpperCamelCase : Optional[Any] =str((Path(a_ ) / fname).relative_to(a_ ) )
__UpperCamelCase : str =short_path.replace('.py' ,'' ).replace(os.path.sep ,'.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(a_ )
return submodules
A_ :str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : Tuple =importlib.util.spec_from_file_location(
'transformers' ,os.path.join(a_ ,'__init__.py' ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
__UpperCamelCase : Union[str, Any] =spec.loader.load_module()
__UpperCamelCase : str =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a_ ) > 0:
__UpperCamelCase : Any ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 720 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A_ :int = logging.get_logger(__name__)
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if not conversation_id:
__UpperCamelCase : int =uuid.uuida()
if past_user_inputs is None:
__UpperCamelCase : int =[]
if generated_responses is None:
__UpperCamelCase : Union[str, Any] =[]
__UpperCamelCase : uuid.UUID =conversation_id
__UpperCamelCase : List[str] =past_user_inputs
__UpperCamelCase : List[str] =generated_responses
__UpperCamelCase : Optional[str] =text
def __eq__( self , lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
__UpperCamelCase : Any =text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__UpperCamelCase : Optional[int] =text
def __lowercase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__UpperCamelCase : List[Any] =None
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
self.generated_responses.append(lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
__UpperCamelCase : Any =f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__UpperCamelCase : Tuple ='user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
a , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __A ( a ):
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.tokenizer.pad_token_id is None:
__UpperCamelCase : int =self.tokenizer.eos_token
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int ={}
__UpperCamelCase : Tuple ={}
__UpperCamelCase : Union[str, Any] ={}
if min_length_for_response is not None:
__UpperCamelCase : Union[str, Any] =min_length_for_response
if minimum_tokens is not None:
__UpperCamelCase : Tuple =minimum_tokens
if "max_length" in generate_kwargs:
__UpperCamelCase : Optional[Any] =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__UpperCamelCase : Any =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCamelCase__ , lowerCamelCase__=0 , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =super().__call__(lowerCamelCase__ , num_workers=lowerCamelCase__ , **lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) == 1:
return outputs[0]
return outputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=32 ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
__UpperCamelCase : List[str] =self.tokenizer._build_conversation_input_ids(lowerCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__UpperCamelCase : List[Any] =self._legacy_parse_and_tokenize(lowerCamelCase__ )
if self.framework == "pt":
__UpperCamelCase : Any =torch.LongTensor([input_ids] )
elif self.framework == "tf":
__UpperCamelCase : Any =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=10 , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =generate_kwargs.get('max_length' , self.model.config.max_length )
__UpperCamelCase : Dict =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__UpperCamelCase : str =max_length - minimum_tokens
__UpperCamelCase : Optional[Any] =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
__UpperCamelCase : Any =model_inputs['attention_mask'][:, -trim:]
__UpperCamelCase : List[str] =model_inputs.pop('conversation' )
__UpperCamelCase : int =max_length
__UpperCamelCase : Optional[int] =self.model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
if self.model.config.is_encoder_decoder:
__UpperCamelCase : Tuple =1
else:
__UpperCamelCase : List[Any] =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : Any =model_outputs['output_ids']
__UpperCamelCase : Dict =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , )
__UpperCamelCase : str =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCamelCase__ )
return conversation
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.tokenizer.eos_token_id
__UpperCamelCase : Any =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > self.tokenizer.model_max_length:
__UpperCamelCase : Tuple =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 154 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowercase : List[str] =None
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Tuple ="""▁"""
_lowercase : Union[str, Any] ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Dict ={
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
_lowercase : str ={
"""google/pegasus-xsum""": 512,
}
class UpperCamelCase_ ( snake_case__ ):
_a : List[Any] = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = PegasusTokenizer
_a : Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[str] , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Any="<pad>" , lowerCamelCase : List[Any]="</s>" , lowerCamelCase : Optional[Any]="<unk>" , lowerCamelCase : Any="<mask_2>" , lowerCamelCase : Any="<mask_1>" , lowerCamelCase : Any=None , lowerCamelCase : Any=1_03 , **lowerCamelCase : int , ):
lowerCamelCase_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
F"additional_special_tokens should be of type {type(lowerCamelCase )}, but is"
F" {type(lowerCamelCase )}" )
lowerCamelCase_ : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCamelCase_ : Tuple = additional_special_tokens_extended
else:
lowerCamelCase_ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def __a ( self : List[str] , lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def __a ( self : Dict , lowerCamelCase : List , lowerCamelCase : Optional[List] = None , lowerCamelCase : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __a ( self : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self : int , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase_ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 364 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCamelCase_ : int = str(bin(lowerCAmelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCamelCase_ : Union[str, Any] = str(bin(lowerCAmelCase__ ) )[2:]
if shift_amount >= len(lowerCAmelCase__ ):
return "0b0"
lowerCamelCase_ : List[str] = binary_number[: len(lowerCAmelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if number >= 0: # Get binary representation of positive number
lowerCamelCase_ : List[Any] = '0' + str(bin(lowerCAmelCase__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCamelCase_ : Any = len(bin(lowerCAmelCase__ )[3:] ) # Find 2's complement of number
lowerCamelCase_ : List[str] = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
lowerCamelCase_ : List[str] = (
'1' + '0' * (binary_number_length - len(lowerCAmelCase__ )) + binary_number
)
if shift_amount >= len(lowerCAmelCase__ ):
return "0b" + binary_number[0] * len(lowerCAmelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCAmelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 | 1 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 4000000 ):
'''simple docstring'''
A_ = []
A_ ,A_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE )
A_ ,A_ = b, a + b
return sum(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'{solution() = }')
| 709 |
from __future__ import annotations
import pandas as pd
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
A_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i]
A_ = 0
A_ = 0
A_ = 999999999
A_ = 0
A_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A_ = remaining_time[j]
A_ = j
A_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A_ = remaining_time[short]
if minm == 0:
A_ = 999999999
if remaining_time[short] == 0:
complete += 1
A_ = False
# Find finish time of current process
A_ = increment_time + 1
# Calculate waiting time
A_ = finish_time - arrival_time[short]
A_ = finar - burst_time[short]
if waiting_time[short] < 0:
A_ = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(SCREAMING_SNAKE_CASE ):
A_ = total_waiting_time + waiting_time[i]
A_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__lowercase = int(input())
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
__lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__lowercase , __lowercase = map(int, input().split())
__lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowercase = burst_time
__lowercase = no_of_processes
__lowercase = waiting_time
__lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 563 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a : Any = logging.get_logger(__name__)
a : Dict[Optional[str], Type[Formatter]] = {}
a : Dict[Optional[str], str] = {}
a : Dict[Optional[str], Exception] = {}
def lowerCAmelCase_ (lowerCAmelCase__: type , lowerCAmelCase__: Optional[str] , lowerCAmelCase__: Optional[List[str]] = None , ):
"""simple docstring"""
UpperCAmelCase_: Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
UpperCAmelCase_: Optional[Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
UpperCAmelCase_: List[str] = format_type
def lowerCAmelCase_ (lowerCAmelCase__: Exception , lowerCAmelCase__: Optional[str] , lowerCAmelCase__: Optional[List[str]] = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCAmelCase_: Optional[int] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
a : str = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
a : Optional[int] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
a : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCAmelCase_ (lowerCAmelCase__: Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCAmelCase_ (lowerCAmelCase__: Optional[str] , **lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = get_format_type_from_alias(lowerCAmelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 556 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = CLIPConfig
A = ['''CLIPEncoderLayer''']
def __init__(self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = CLIPVisionModelWithProjection(config.vision_config )
UpperCAmelCase_: Tuple = nn.Linear(config.vision_config.projection_dim, 1 )
UpperCAmelCase_: Tuple = nn.Linear(config.vision_config.projection_dim, 1 )
@torch.no_grad()
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0.5, SCREAMING_SNAKE_CASE_=0.5 ) -> Tuple:
UpperCAmelCase_: Optional[Any] = self.vision_model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: List[str] = self.p_head(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = nsfw_detected.flatten()
UpperCAmelCase_: List[Any] = nsfw_detected > p_threshold
UpperCAmelCase_: Any = nsfw_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if nsfw_detected_:
UpperCAmelCase_: Tuple = np.zeros(images[idx].shape )
UpperCAmelCase_: str = self.w_head(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = watermark_detected.flatten()
UpperCAmelCase_: Tuple = watermark_detected > w_threshold
UpperCAmelCase_: int = watermark_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if watermark_detected_:
UpperCAmelCase_: Any = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 556 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = BlenderbotSmallTokenizer
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Union[str, Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_UpperCAmelCase : Any = dict(zip(A_ , range(len(A_ ) ) ) )
_UpperCAmelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_UpperCAmelCase : str = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "adapt act apte"
_UpperCAmelCase : Dict = "adapt act apte"
return input_text, output_text
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Tuple = "adapt act apte"
_UpperCAmelCase : str = ["adapt", "act", "ap@@", "te"]
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_UpperCAmelCase : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
_UpperCAmelCase : Any = "I am a small frog."
_UpperCAmelCase : List[Any] = tok([src_text] , padding=A_ , truncation=A_ )["input_ids"]
_UpperCAmelCase : Optional[int] = tok.batch_decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_UpperCAmelCase : List[Any] = "I am a small frog ."
_UpperCAmelCase : str = "."
_UpperCAmelCase : str = tok(A_ )["input_ids"]
_UpperCAmelCase : str = tok(A_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 467 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
_UpperCAmelCase : Dict = b.T
_UpperCAmelCase : Dict = np.sum(np.square(lowerCAmelCase ) , axis=1 )
_UpperCAmelCase : Optional[Any] = np.sum(np.square(lowerCAmelCase ) , axis=0 )
_UpperCAmelCase : str = np.matmul(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict ) -> int:
_UpperCAmelCase : Any = x.reshape(-1 , 3 )
_UpperCAmelCase : List[str] = squared_euclidean_distance(lowerCAmelCase , lowerCAmelCase )
return np.argmin(lowerCAmelCase , axis=1 )
class a ( UpperCAmelCase ):
_lowercase = ["pixel_values"]
def __init__( self , A_ = None , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
_UpperCAmelCase : Optional[int] = get_size_dict(A_ )
_UpperCAmelCase : Union[str, Any] = np.array(A_ ) if clusters is not None else None
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Optional[Any] = resample
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : List[str] = do_color_quantize
def _UpperCAmelCase ( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
A_ , size=(size["height"], size["width"]) , resample=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ = None , ):
'''simple docstring'''
_UpperCAmelCase : Dict = rescale(image=A_ , scale=1 / 1_27.5 , data_format=A_ )
_UpperCAmelCase : List[Any] = image - 1
return image
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : Dict = get_size_dict(A_ )
_UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase : Any = clusters if clusters is not None else self.clusters
_UpperCAmelCase : Optional[int] = np.array(A_ )
_UpperCAmelCase : List[str] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : List[str] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_UpperCAmelCase : int = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_normalize:
_UpperCAmelCase : List[str] = [self.normalize(image=A_ ) for image in images]
if do_color_quantize:
_UpperCAmelCase : Tuple = [to_channel_dimension_format(A_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase : List[str] = np.array(A_ )
_UpperCAmelCase : List[Any] = color_quantize(A_ , A_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCAmelCase : Any = images.shape[0]
_UpperCAmelCase : List[Any] = images.reshape(A_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase : Union[str, Any] = list(A_ )
else:
_UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_UpperCAmelCase : List[Any] = {"input_ids": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 467 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class __UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str = None , UpperCAmelCase : list = None ) -> Optional[Any]:
lowerCAmelCase :List[Any] = None
lowerCAmelCase :Optional[Any] = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase :Tuple = os.path.abspath('examples' )
for item in os.listdir(SCREAMING_SNAKE_CASE_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase :str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase :Optional[int] = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :List[str] = '\n'.join(SCREAMING_SNAKE_CASE_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase :Any = diff.replace(SCREAMING_SNAKE_CASE_ , '' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '' )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
self.one_complete_example('complete_nlp_example.py' , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('complete_nlp_example.py' , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase :List[Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase :Any = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('complete_cv_example.py' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __UpperCamelCase ( __lowercase ):
lowercase_ : str = False
@classmethod
def UpperCAmelCase__ ( cls : str ) -> List[str]:
super().setUpClass()
lowerCAmelCase :int = tempfile.mkdtemp()
lowerCAmelCase :Optional[int] = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase :Dict = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase :List[Any] = f"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
lowerCAmelCase :List[str] = f"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split()
lowerCAmelCase :Tuple = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def UpperCAmelCase__ ( self : str ) -> Tuple:
lowerCAmelCase :Any = f"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n """.split()
lowerCAmelCase :Optional[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
self.assertNotIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase :Dict = f"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n """.split()
lowerCAmelCase :Dict = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
if torch.cuda.is_available():
lowerCAmelCase :int = torch.cuda.device_count()
else:
lowerCAmelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
else:
self.assertIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> str:
lowerCAmelCase :Dict = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase :List[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :str = re.findall('({.+})' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Any = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase :List[str] = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(results['accuracy'] , 0.7_5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase :int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase :str = f"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'tracking' ) ) )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
lowerCAmelCase :Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def UpperCAmelCase__ ( self : str ) -> Dict:
lowerCAmelCase :int = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 553 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCamelCase__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 226 |
from __future__ import annotations
def A(__a: float , __a: float , __a: float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _A ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = RegNetForImageClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Tuple = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Any = False
UpperCamelCase__ : int = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _A ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_A )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=_A )
for name, module in model.named_modules():
if isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _A ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
__SCREAMING_SNAKE_CASE = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_A , _A ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_A , _A , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = RegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**_A )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 148 |
# Algorithm for the pigeonhole sorting
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = min(a__ ) # min() finds the minimum value
__SCREAMING_SNAKE_CASE = max(a__ ) # max() finds the maximum value
__SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__SCREAMING_SNAKE_CASE = 0
for count in range(a__ ):
while holes[count] > 0:
holes[count] -= 1
__SCREAMING_SNAKE_CASE = count + min_val
i += 1
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__ )
print('Sorted order is:' , ' '.join(a__ ) )
if __name__ == "__main__":
main()
| 148 | 1 |
import argparse
import os
import re
_lowerCamelCase = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCamelCase = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCamelCase = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCamelCase = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCamelCase = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCamelCase = re.compile(r'\[([^\]]+)\]')
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : int = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def __UpperCAmelCase( lowercase_ , lowercase_="" , lowercase_=None , lowercase_=None ):
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
_lowerCamelCase : Tuple = ['''\n'''.join(lines[:index] )]
else:
_lowerCamelCase : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCamelCase : Dict = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
_lowerCamelCase : Any = [lines[index + 1]]
index += 1
else:
_lowerCamelCase : Optional[Any] = []
else:
blocks.append('''\n'''.join(lowercase_ ) )
_lowerCamelCase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append('''\n'''.join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def __UpperCAmelCase( lowercase_ ):
def _inner(lowercase_ ):
return key(lowercase_ ).lower().replace('''_''' , '''''' )
return _inner
def __UpperCAmelCase( lowercase_ , lowercase_=None ):
# If no key is provided, we use a noop.
def noop(lowercase_ ):
return x
if key is None:
_lowerCamelCase : Tuple = noop
# Constants are all uppercase, they go first.
_lowerCamelCase : Any = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCamelCase : Optional[Any] = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCamelCase : int = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
_lowerCamelCase : List[str] = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def __UpperCAmelCase( lowercase_ ):
# This inner function sort imports between [ ].
def _replace(lowercase_ ):
_lowerCamelCase : Optional[int] = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
_lowerCamelCase : Optional[Any] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : str = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
_lowerCamelCase : Tuple = import_statement.split('''\n''' )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCamelCase : Optional[int] = 2 if lines[1].strip() == '''[''' else 1
_lowerCamelCase : Optional[Any] = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCamelCase : Union[str, Any] = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
_lowerCamelCase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCamelCase : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCamelCase : Dict = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : Union[str, Any] = keys[:-1]
_lowerCamelCase : Optional[Any] = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCamelCase : str = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def __UpperCAmelCase( lowercase_ , lowercase_=True ):
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCamelCase : Any = split_code_in_indented_blocks(
lowercase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCamelCase : Optional[int] = main_blocks[block_idx]
_lowerCamelCase : Optional[int] = block.split('''\n''' )
# Get to the start of the imports.
_lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCamelCase : Optional[int] = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCamelCase : Optional[int] = '''\n'''.join(block_lines[line_idx:-1] )
_lowerCamelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCamelCase : Any = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCamelCase : List[str] = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCamelCase : Optional[Any] = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
_lowerCamelCase : int = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Dict = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCamelCase : List[str] = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase_ , '''w''' ) as f:
f.write('''\n'''.join(lowercase_ ) )
def __UpperCAmelCase( lowercase_=True ):
_lowerCamelCase : Any = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_lowerCamelCase : List[str] = sort_imports(os.path.join(lowercase_ , '''__init__.py''' ) , check_only=lowercase_ )
if result:
_lowerCamelCase : Optional[int] = [os.path.join(lowercase_ , '''__init__.py''' )]
if len(lowercase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 613 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Dict = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : int = self.get_input_output_texts(a__)
_lowerCamelCase : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__)
_lowerCamelCase : str = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__)
return text, ids
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file)
_lowerCamelCase : Tuple = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : int = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : List[str] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : str = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : int = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = MecabTokenizer(do_lower_case=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : Any = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MecabTokenizer(normalize_text=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(a__)
_lowerCamelCase : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Tuple = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : str = pickle.load(a__)
_lowerCamelCase : Any = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : List[str] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : Optional[Any] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer(do_lower_case=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = JumanppTokenizer(normalize_text=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = JumanppTokenizer(trim_whitespace=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : Optional[Any] = i
_lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
_lowerCamelCase : str = tokenizer.subword_tokenizer
_lowerCamelCase : Any = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(a__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
_lowerCamelCase : str = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(a__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
_lowerCamelCase : int = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , **a__):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : int = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
a__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : List[Any] = i
_lowerCamelCase : Union[str, Any] = CharacterTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
_lowerCamelCase : List[str] = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Optional[int] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''cl-tohoku/bert-base-japanese'''
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(a__)
self.assertIsInstance(a__ , a__)
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
_lowerCamelCase : List[Any] = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 613 | 1 |
def __snake_case ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : set ) -> int:
A_ , A_ : Optional[Any] = len(_lowerCAmelCase ), len(grid[0] )
if (
min(_lowerCAmelCase , _lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A_ : Any = 0
count += depth_first_search(_lowerCAmelCase , row + 1 , _lowerCAmelCase , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , row - 1 , _lowerCAmelCase , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , _lowerCAmelCase , col + 1 , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , _lowerCAmelCase , col - 1 , _lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454 |
import argparse
import struct
import unittest
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :bytes ):
'''simple docstring'''
A_ : Dict = data
# Initialize hash values
A_ : Union[str, Any] = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
A_ : List[Any] = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
A_ : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :bytes ):
'''simple docstring'''
A_ : List[Any] = B"\x80" + (B"\x00" * (63 - (len(snake_case ) + 8) % 64))
A_ : str = struct.pack(">Q" , (len(snake_case ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A_ : Dict = list(struct.unpack(">16L" , snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ : int = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A_ : int = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A_ : Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A_ : Optional[int] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
A_ : Dict = self.ror(snake_case , 6 ) ^ self.ror(snake_case , 11 ) ^ self.ror(snake_case , 25 )
A_ : int = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
A_ : Optional[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
A_ : List[str] = self.ror(snake_case , 2 ) ^ self.ror(snake_case , 13 ) ^ self.ror(snake_case , 22 )
A_ : List[Any] = (a & b) ^ (a & c) ^ (b & c)
A_ : Dict = (sa + maj) % 0x1_00_00_00_00
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ : Optional[int] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
A_ : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
A_ : List[str] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
A_ : Optional[int] = "".join([hex(snake_case )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :int ):
'''simple docstring'''
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
import hashlib
A_ : int = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(snake_case ).hash , hashlib.shaaaa(snake_case ).hexdigest() )
def __snake_case ( ) -> None:
import doctest
doctest.testmod()
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
A_ : List[Any] = parser.parse_args()
A_ : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
A_ : Optional[Any] = f.read()
else:
A_ : Any = bytes(_lowerCAmelCase , "utf-8" )
print(SHAaaa(_lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 454 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase=0.01 , __lowerCamelCase=1_0_0_0 ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = p_stop
_SCREAMING_SNAKE_CASE : Any = max_length
def __iter__( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : int = False
while not stop and count < self.max_length:
yield count
count += 1
_SCREAMING_SNAKE_CASE : int = random.random() < self.p_stop
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=True ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = [
BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
for i in range(2 )
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [list(__UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCamelCase ) for shard in batch_sampler_shards] , [len(__UpperCamelCase ) for e in expected] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
_SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_SCREAMING_SNAKE_CASE : Optional[Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( self ) -> int:
# Check the shards when the dataset is a round multiple of batch size.
_SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_SCREAMING_SNAKE_CASE : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
_SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE : Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
_SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_SCREAMING_SNAKE_CASE : int = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_SCREAMING_SNAKE_CASE : List[str] = [BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , even_batches=__UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=2 , __lowerCamelCase=False ) -> int:
random.seed(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = list(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = [
IterableDatasetShard(
__UpperCamelCase , batch_size=__UpperCamelCase , drop_last=__UpperCamelCase , num_processes=__UpperCamelCase , process_index=__UpperCamelCase , split_batches=__UpperCamelCase , )
for i in range(__UpperCamelCase )
]
_SCREAMING_SNAKE_CASE : List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCamelCase )
iterable_dataset_lists.append(list(__UpperCamelCase ) )
_SCREAMING_SNAKE_CASE : List[str] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_SCREAMING_SNAKE_CASE : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(len(__UpperCamelCase ) % shard_batch_size == 0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCamelCase ) < len(__UpperCamelCase ):
reference += reference
self.assertListEqual(__UpperCamelCase , reference[: len(__UpperCamelCase )] )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 4_2
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
# Edge case with a very small dataset
_SCREAMING_SNAKE_CASE : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = SkipBatchSampler(__UpperCamelCase , 2 )
self.assertListEqual(list(__UpperCamelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_SCREAMING_SNAKE_CASE : Tuple = skip_first_batches(__UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCamelCase_ ( self ) -> Tuple:
Accelerator()
_SCREAMING_SNAKE_CASE : str = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 707 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=6 , __lowerCamelCase=1_7 , __lowerCamelCase=2_3 , __lowerCamelCase=1_1 , __lowerCamelCase=True , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : int = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = act_dim
_SCREAMING_SNAKE_CASE : Optional[int] = state_dim
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Any = max_length
_SCREAMING_SNAKE_CASE : Optional[int] = is_training
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
_SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask((self.batch_size, self.seq_length) )
_SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase_ ( self ) -> List[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = DecisionTransformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Any = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (DecisionTransformerModel,) if is_torch_available() else ()
__snake_case = ()
__snake_case = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__snake_case = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = DecisionTransformerModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Dict:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : str = DecisionTransformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__lowerCamelCase )] , __lowerCamelCase )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = 2 # number of steps of autoregressive prediction we will perform
_SCREAMING_SNAKE_CASE : Tuple = 1_0 # defined by the RL environment, may be normalized
_SCREAMING_SNAKE_CASE : int = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.config
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCamelCase , dtype=torch.floataa ) # env.reset()
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_SCREAMING_SNAKE_CASE : List[str] = state
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(1 , 0 , config.act_dim , device=__lowerCamelCase , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Dict = torch.zeros(1 , 0 , device=__lowerCamelCase , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(0 , device=__lowerCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowerCamelCase )] , dim=1 )
_SCREAMING_SNAKE_CASE : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowerCamelCase )] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = model(
states=__lowerCamelCase , actions=__lowerCamelCase , rewards=__lowerCamelCase , returns_to_go=__lowerCamelCase , timesteps=__lowerCamelCase , attention_mask=__lowerCamelCase , return_dict=__lowerCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
_SCREAMING_SNAKE_CASE : Optional[Any] = action_pred[0, -1]
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([states, state] , dim=1 )
_SCREAMING_SNAKE_CASE : List[str] = returns_to_go[0, -1] - reward
_SCREAMING_SNAKE_CASE : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowerCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 ) | 381 | 0 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> None:
__lowerCAmelCase = set_counts
__lowerCAmelCase = max(snake_case_ )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = [1] * num_sets
__lowerCAmelCase = list(range(snake_case_ ) )
def A__ ( self , snake_case_ , snake_case_ ) -> bool:
__lowerCAmelCase = self.get_parent(snake_case_ )
__lowerCAmelCase = self.get_parent(snake_case_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__lowerCAmelCase = 0
__lowerCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__lowerCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__lowerCAmelCase = 0
__lowerCAmelCase = src_parent
__lowerCAmelCase = self.set_counts[src_parent]
__lowerCAmelCase = max(self.max_set , snake_case_ )
return True
def A__ ( self , snake_case_ ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__lowerCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 465 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__lowerCAmelCase = nn.Parameter(_lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__lowerCAmelCase = nn.Parameter(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase = np.asarray(weights[0] )
__lowerCAmelCase = np.asarray(weights[1] )
__lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase = np.asarray(weights[0] )
__lowerCAmelCase = np.asarray(weights[1] )
__lowerCAmelCase = np.asarray(weights[2] )
__lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# layernorm 1
__lowerCAmelCase = weights[0][0][0]
__lowerCAmelCase = np.asarray(layer_norm_a[0] )
__lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# lsh weights + output
__lowerCAmelCase = weights[0][1]
if len(_lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
else:
set_layer_weights_in_torch_local(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
# intermediate weighs
__lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowerCAmelCase ) == 4:
__lowerCAmelCase = intermediate_weights[2]
# layernorm 2
__lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# intermediate dense
__lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
# intermediate out
__lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# reformer model
__lowerCAmelCase = torch_model.reformer
# word embeds
__lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowerCAmelCase ) , )
if isinstance(weights[3] , _lowerCAmelCase ):
__lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__lowerCAmelCase = nn.Parameter(torch.tensor(_lowerCAmelCase ) )
__lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# output layer norm
__lowerCAmelCase = np.asarray(weights[7][0] )
__lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# output embeddings
__lowerCAmelCase = np.asarray(weights[9][0] )
__lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Initialise PyTorch model
__lowerCAmelCase = ReformerConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase = ReformerModelWithLMHead(_lowerCAmelCase )
with open(_lowerCAmelCase , """rb""" ) as f:
__lowerCAmelCase = pickle.load(_lowerCAmelCase )["""weights"""]
set_model_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 465 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __magic_name__ ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.Linear(3 , 4 )
_lowerCAmelCase = nn.BatchNormad(4 )
_lowerCAmelCase = nn.Linear(4 , 5 )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class __magic_name__ ( _UpperCamelCase ):
def _lowerCamelCase ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __magic_name__ ( _UpperCamelCase ):
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
return output + 1
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , '_hf_hook' ) )
self.assertFalse(hasattr(__magic_name__ , '_old_forward' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , '_hf_hook' ) )
self.assertFalse(hasattr(__magic_name__ , '_old_forward' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = test_model(x + 1 )
_lowerCAmelCase = test_model(x + 2 )
_lowerCAmelCase = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = test_model(__magic_name__ )
_lowerCAmelCase = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1e-5 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = test_model(__magic_name__ )
_lowerCAmelCase = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_lowerCAmelCase = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCAmelCase = True
_lowerCAmelCase = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
_lowerCAmelCase = torch.randn(2 , 3 ).to(0 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_lowerCAmelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 309 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
"""simple docstring"""
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_lowerCAmelCase = nn.Parameter(__lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_lowerCAmelCase = nn.Parameter(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# set torch weights for 1-to-1 comparison
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(__lowerCamelCase ).view(-1, __lowerCamelCase ).contiguous().transpose(0, 1 ), )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# set torch weights for 1-to-1 comparison
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
_lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.self_attention.key, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(__lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, __lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(__lowerCamelCase ).view(-1, __lowerCamelCase ).contiguous().transpose(0, 1 ), )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# layernorm 1
_lowerCAmelCase = weights[0][0][0]
_lowerCAmelCase = np.asarray(layer_norm_a[0] )
_lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ), )
# lsh weights + output
_lowerCAmelCase = weights[0][1]
if len(__lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCamelCase, torch_block.attention, __lowerCamelCase )
else:
set_layer_weights_in_torch_local(__lowerCamelCase, torch_block.attention, __lowerCamelCase )
# intermediate weighs
_lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCamelCase ) == 4:
_lowerCAmelCase = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ), )
# intermediate dense
_lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(__lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(__lowerCamelCase ), )
# intermediate out
_lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(__lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(__lowerCamelCase ), )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# reformer model
_lowerCAmelCase = torch_model.reformer
# word embeds
_lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(__lowerCamelCase ), )
if isinstance(weights[3], __lowerCamelCase ):
_lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_lowerCAmelCase = nn.Parameter(torch.tensor(__lowerCamelCase ) )
_lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# output layer norm
_lowerCAmelCase = np.asarray(weights[7][0] )
_lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ), )
# output embeddings
_lowerCAmelCase = np.asarray(weights[9][0] )
_lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(__lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(__lowerCamelCase ), )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# Initialise PyTorch model
_lowerCAmelCase = ReformerConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = ReformerModelWithLMHead(__lowerCamelCase )
with open(__lowerCamelCase, 'rb' ) as f:
_lowerCAmelCase = pickle.load(__lowerCamelCase )['weights']
set_model_weights_in_torch(__lowerCamelCase, __lowerCamelCase, config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), __lowerCamelCase )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ : int = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 309 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ : List[Any] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Dict = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Dict = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCamelCase__ : Any = '''▁'''
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[str] = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = ["input_ids", "attention_mask"]
__a : Dict = BarthezTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<s>" ,snake_case__="<unk>" ,snake_case__="<pad>" ,snake_case__="<mask>" ,**snake_case__ ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : int = False if not self.vocab_file else True
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file ,snake_case__ )
return (out_vocab_file,)
| 105 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCAmelCase_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
UpperCAmelCase_ : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
UpperCAmelCase_ : int = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Any:
if rouge_types is None:
a_ : Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a_ : List[Any] = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE__ , use_stemmer=SCREAMING_SNAKE_CASE__ )
if use_aggregator:
a_ : int = scoring.BootstrapAggregator()
else:
a_ : Optional[int] = []
for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = scorer.score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if use_aggregator:
aggregator.add_scores(SCREAMING_SNAKE_CASE__ )
else:
scores.append(SCREAMING_SNAKE_CASE__ )
if use_aggregator:
a_ : List[Any] = aggregator.aggregate()
else:
a_ : Any = {}
for key in scores[0]:
a_ : Any = [score[key] for score in scores]
return result
| 443 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple ) -> Any:
"""simple docstring"""
assert isinstance(__A , __A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Any , __A : Tuple ) -> Tuple:
"""simple docstring"""
a_ : Dict = tmp_path / 'cache'
a_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Union[str, Any] = ParquetDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Dict , __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
a_ : Tuple = tmp_path / 'cache'
a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : List[str] = features.copy() if features else default_expected_features
a_ : int = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : List[str] = ParquetDatasetReader(__A , features=__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[str] , __A : int ) -> List[str]:
"""simple docstring"""
a_ : int = tmp_path / 'cache'
a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : List[str] = ParquetDatasetReader(__A , cache_dir=__A , split=__A ).read()
_check_parquet_dataset(__A , __A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : str , __A : Optional[int] ) -> Any:
"""simple docstring"""
if issubclass(__A , __A ):
a_ : Tuple = parquet_path
elif issubclass(__A , __A ):
a_ : str = [parquet_path]
a_ : int = tmp_path / 'cache'
a_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : str = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_dataset(__A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Dict , __A : Optional[Any]=("train",) ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__A , __A )
for split in splits:
a_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
a_ : Union[str, Any] = tmp_path / 'cache'
a_ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Tuple = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__A , keep_in_memory=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[int] , __A : Tuple ) -> List[Any]:
"""simple docstring"""
a_ : Optional[Any] = tmp_path / 'cache'
a_ : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : Optional[int] = features.copy() if features else default_expected_features
a_ : Tuple = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : Optional[Any] = ParquetDatasetReader({'train': parquet_path} , features=__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] , __A : Optional[Any] ) -> Any:
"""simple docstring"""
if split:
a_ : Any = {split: parquet_path}
else:
a_ : Dict = 'train'
a_ : int = {'train': parquet_path, 'test': parquet_path}
a_ : int = tmp_path / 'cache'
a_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ : Tuple = ParquetDatasetReader(__A , cache_dir=__A ).read()
_check_parquet_datasetdict(__A , __A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
a_ : List[str] = ParquetDatasetWriter(__A , tmp_path / 'foo.parquet' )
assert writer.write() > 0
a_ : List[str] = pq.ParquetFile(tmp_path / 'foo.parquet' )
a_ : Dict = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Any ) -> Optional[int]:
"""simple docstring"""
a_ : str = str(shared_datadir / 'test_image_rgb.jpg' )
a_ : List[Any] = {'image': [image_path]}
a_ : int = Features({'image': Image()} )
a_ : List[Any] = Dataset.from_dict(__A , features=__A )
a_ : str = ParquetDatasetWriter(__A , tmp_path / 'foo.parquet' )
assert writer.write() > 0
a_ : str = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
a_ : str = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[str] ) -> List[str]:
"""simple docstring"""
assert get_writer_batch_size(__A ) == expected
| 443 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
_snake_case = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=1E-3 )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = torch.linspace(1 , self.config.sampling_eps , __lowercase , device=__lowercase )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Dict:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__UpperCamelCase = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__UpperCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__UpperCamelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__UpperCamelCase = std.unsqueeze(-1 )
__UpperCamelCase = -score / std
# compute
__UpperCamelCase = -1.0 / len(self.timesteps )
__UpperCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__UpperCamelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__UpperCamelCase = beta_t.unsqueeze(-1 )
__UpperCamelCase = -0.5 * beta_t * x
__UpperCamelCase = torch.sqrt(__lowercase )
__UpperCamelCase = drift - diffusion**2 * score
__UpperCamelCase = x + drift * dt
# add noise
__UpperCamelCase = randn_tensor(x.shape , layout=x.layout , generator=__lowercase , device=x.device , dtype=x.dtype )
__UpperCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 715 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A_ ( snake_case : str , snake_case : str , **snake_case : List[str] ) -> Dict:
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained(snake_case , **snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_config(snake_case )
model.save_pretrained(snake_case )
AutoTokenizer.from_pretrained(snake_case ).save_pretrained(snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 451 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 674 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCamelCase__ = '''CIDAS/clipseg-rd64-refined'''
lowerCamelCase__ = '''image_segmenter'''
lowerCamelCase__ = CLIPSegForImageSegmentation
lowerCamelCase__ = ['''image''', '''text''']
lowerCamelCase__ = ['''image''']
def __init__( self :Dict , *_lowerCamelCase :Union[str, Any] , **_lowerCamelCase :Tuple ):
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :"Image" , _lowerCamelCase :str ):
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Optional[int] ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = self.model(**_lowerCamelCase ).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Tuple ):
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.cpu().detach().numpy()
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 674 | 1 |
def a ( A__ = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : str = int(A__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
SCREAMING_SNAKE_CASE__ : Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[str] = n // i
i += 1
return int(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 250 |
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : List[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : Union[str, Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __a ( A__ : List[str] , A__ : str=0.9_9_9 , A__ : Tuple="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Optional[int] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Tuple ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
SCREAMING_SNAKE_CASE = []
for i in range(A__ ):
SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase__ = 2
@register_to_config
def __init__( self : Tuple , __lowerCamelCase : int = 1000 , __lowerCamelCase : float = 0.00_085 , __lowerCamelCase : float = 0.012 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : str = "linspace" , __lowerCamelCase : int = 0 , ):
if trained_betas is not None:
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
SCREAMING_SNAKE_CASE = 1.0 - self.betas
SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str]=None ):
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE = self.timesteps
SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE = 1 if len(__lowerCamelCase ) > 1 else 0
else:
SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _snake_case ( self : str ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _snake_case ( self : List[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[float, torch.FloatTensor] , ):
SCREAMING_SNAKE_CASE = self.index_for_timestep(__lowerCamelCase )
if self.state_in_first_order:
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
else:
SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None , __lowerCamelCase : Optional[int] = None , ):
SCREAMING_SNAKE_CASE = num_inference_steps
SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
# interpolate sigmas
SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowerCamelCase ).startswith("mps" ):
# mps does not support float64
SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
# interpolate timesteps
SCREAMING_SNAKE_CASE = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype )
SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps] )
SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE = defaultdict(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : List[str] ):
# get log sigma
SCREAMING_SNAKE_CASE = sigma.log()
# get distribution
SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None]
# get sigmas range
SCREAMING_SNAKE_CASE = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE = low_idx + 1
SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx]
SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE = w.clamp(0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE = t.view(sigma.shape )
return t
@property
def _snake_case ( self : str ):
return self.sample is None
def _snake_case ( self : str , __lowerCamelCase : Union[torch.FloatTensor, np.ndarray] , __lowerCamelCase : Union[float, torch.FloatTensor] , __lowerCamelCase : Union[torch.FloatTensor, np.ndarray] , __lowerCamelCase : bool = True , ):
SCREAMING_SNAKE_CASE = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1]
SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
SCREAMING_SNAKE_CASE = self.sample
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def _snake_case ( self : List[str] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps]
SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[Any] ):
return self.config.num_train_timesteps | 16 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case : List[Any] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 124 | 0 |
import numpy as np
def __snake_case ( _UpperCamelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_UpperCamelCase ) )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
# Base Case
if index == len(_UpperCamelCase ):
return True
# Recursive Step
for i in range(_UpperCamelCase ):
if valid_coloring(graph[index] , _UpperCamelCase , _UpperCamelCase ):
# Color current vertex
_a = i
# Validate coloring
if util_color(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 ):
return True
# Backtrack
_a = -1
return False
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> list[int]:
_a = [-1] * len(_UpperCamelCase )
if util_color(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 0 ):
return colored_vertices
return []
| 346 | 1 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = n
_UpperCAmelCase = [None] * self.n
_UpperCAmelCase = 0 # index of the first element
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def __len__( self ) -> int:
return self.size
def __A ( self ) -> bool:
return self.size == 0
def __A ( self ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def __A ( self , snake_case_ ) -> Dict:
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_UpperCAmelCase = data
_UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def __A ( self ) -> Union[str, Any]:
if self.size == 0:
raise Exception("UNDERFLOW" )
_UpperCAmelCase = self.array[self.front]
_UpperCAmelCase = None
_UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 426 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Dict = "llama"
A__ : int = ["past_key_values"]
def __init__( self , snake_case_=32000 , snake_case_=4096 , snake_case_=11008 , snake_case_=32 , snake_case_=32 , snake_case_=None , snake_case_="silu" , snake_case_=2048 , snake_case_=0.02 , snake_case_=1e-6 , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=1 , snake_case_=False , snake_case_=None , **snake_case_ , ) -> Any:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_key_value_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = use_cache
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ , )
def __A ( self ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_UpperCAmelCase = self.rope_scaling.get("type" , snake_case_ )
_UpperCAmelCase = self.rope_scaling.get("factor" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 426 | 1 |
"""simple docstring"""
import sys
from pathlib import Path
__a : Dict = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__a : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__a : Tuple = 'zero2'
__a : List[Any] = 'zero3'
__a : str = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
a__ = parameterized.to_safe_name('''_'''.join(str(lowerCamelCase_) for x in param.args))
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__a : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
@parameterized.expand(__A , name_func=__A )
def lowercase ( self: List[Any] , __A: Optional[int] , __A: List[str] ):
'''simple docstring'''
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
@require_torch_multi_gpu
@parameterized.expand(__A , name_func=__A )
def lowercase ( self: Dict , __A: str , __A: List[Any] ):
'''simple docstring'''
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
@parameterized.expand(__A , name_func=__A )
def lowercase ( self: List[str] , __A: List[Any] , __A: Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
@require_torch_multi_gpu
@parameterized.expand(__A , name_func=__A )
def lowercase ( self: Optional[int] , __A: Dict , __A: Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
def lowercase ( self: Tuple , __A: Optional[Any] ):
'''simple docstring'''
pass
def lowercase ( self: Optional[int] , __A: str , __A: str , __A: int = 10 , __A: bool = True , __A: bool = True , __A: bool = True , ):
'''simple docstring'''
a__ = models[model]
a__ = self.run_trainer(
stage=__A , model_name=__A , eval_steps=__A , num_train_epochs=1 , distributed=__A , fpaa=__A , )
self.do_checks(__A )
return output_dir
def lowercase ( self: Tuple , __A: str , __A: str , __A: int = 10 , __A: int = 1 , __A: bool = True , __A: bool = True , ):
'''simple docstring'''
a__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=__A )
a__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__A )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
a__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
a__ = self.get_launcher(__A )
a__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__A , env=self.get_env() )
return output_dir
def lowercase ( self: int , __A: Tuple=False ):
'''simple docstring'''
a__ = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 701 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[int] , __A: Union[str, Any] , __A: str , __A: Optional[Any] , __A: Any=True ):
'''simple docstring'''
a__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a__ = resnet(__A , __A , deterministic=__A )
a__ = attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Dict , __A: int , __A: Dict , __A: Optional[Any]=True ):
'''simple docstring'''
a__ = ()
for resnet in self.resnets:
a__ = resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Any , __A: Optional[int] , __A: List[Any] , __A: List[str] , __A: Optional[Any] , __A: Any=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a__ = resnet(__A , __A , deterministic=__A )
a__ = attn(__A , __A , deterministic=__A )
if self.add_upsample:
a__ = self.upsamplers_a(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: str ):
'''simple docstring'''
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , __A: Optional[Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Dict=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a__ = resnet(__A , __A , deterministic=__A )
if self.add_upsample:
a__ = self.upsamplers_a(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
a__ = attentions
def __call__( self: Any , __A: Optional[int] , __A: int , __A: Tuple , __A: str=True ):
'''simple docstring'''
a__ = self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a__ = attn(__A , __A , deterministic=__A )
a__ = resnet(__A , __A , deterministic=__A )
return hidden_states
| 200 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[Any] ) -> str:
__magic_name__: Optional[Any] = feature_size
__magic_name__: List[Any] = sampling_rate
__magic_name__: Tuple = padding_value
__magic_name__: int = kwargs.pop("""padding_side""" , """right""" )
__magic_name__: Optional[Any] = kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__: Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
__magic_name__: Any = processed_features[self.model_input_names[0]]
__magic_name__: Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__: Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__: Tuple = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__: str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__: List[str] = """tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__: Any = """pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__: int = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(__snake_case )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__: List[str] = to_numpy(__snake_case )
else:
__magic_name__: Any = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__: str = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__: str = processed_features[self.model_input_names[0]]
__magic_name__: str = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__: List[Any] = []
for i in range(__snake_case ):
__magic_name__: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__: List[Any] = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__: Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__: Union[str, Any] = PaddingStrategy.MAX_LENGTH
__magic_name__: List[str] = {}
for i in range(__snake_case ):
# padding
__magic_name__: str = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__magic_name__: Any = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> dict:
__magic_name__: List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__: List[Any] = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__: int = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__: str = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__: List[Any] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__: Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__: Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> int:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__: Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: Tuple = len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__: Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__: List[Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , __snake_case : int=False , __snake_case : Tuple=None ) -> Optional[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
__magic_name__: Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__: Tuple = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__: Dict = padding
else:
__magic_name__: int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 96 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[Any] ) -> str:
__magic_name__: Optional[Any] = feature_size
__magic_name__: List[Any] = sampling_rate
__magic_name__: Tuple = padding_value
__magic_name__: int = kwargs.pop("""padding_side""" , """right""" )
__magic_name__: Optional[Any] = kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__: Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
__magic_name__: Any = processed_features[self.model_input_names[0]]
__magic_name__: Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__: Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__: Tuple = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__: str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__: List[str] = """tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__: Any = """pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__: int = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(__snake_case )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__: List[str] = to_numpy(__snake_case )
else:
__magic_name__: Any = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__: str = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__: str = processed_features[self.model_input_names[0]]
__magic_name__: str = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__: List[Any] = []
for i in range(__snake_case ):
__magic_name__: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__: List[Any] = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__: Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__: Union[str, Any] = PaddingStrategy.MAX_LENGTH
__magic_name__: List[str] = {}
for i in range(__snake_case ):
# padding
__magic_name__: str = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__magic_name__: Any = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> dict:
__magic_name__: List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__: List[Any] = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__: int = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__: str = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__: List[Any] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__: Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__: Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> int:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__: Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: Tuple = len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__: Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__: List[Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , __snake_case : int=False , __snake_case : Tuple=None ) -> Optional[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
__magic_name__: Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__: Tuple = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__: Dict = padding
else:
__magic_name__: int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 96 | 1 |
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->bool:
UpperCAmelCase = len(lowerCAmelCase_ ) + 1
UpperCAmelCase = len(lowerCAmelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase_ ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase_ ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase_ ):
for j in range(1 , lowerCAmelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__a = """aab"""
__a = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 627 |
from math import isqrt
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase_ ) + 1 ) )
def _UpperCamelCase ( lowerCAmelCase_ = 1_0**6 ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 627 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : torch.FloatTensor
class UpperCamelCase_ ( A , A ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 3 , _lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , _lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , _lowerCamelCase : Tuple[int] = (64,) , _lowerCamelCase : int = 1 , _lowerCamelCase : str = "silu" , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 32 , _lowerCamelCase : int = 2_56 , _lowerCamelCase : int = 32 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : float = 0.18_215 , _lowerCamelCase : str = "group" , ) -> Dict:
super().__init__()
# pass init params to Encoder
__magic_name__ = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
__magic_name__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
__magic_name__ = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
__magic_name__ = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase )
__magic_name__ = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
# pass init params to Decoder
__magic_name__ = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def __A ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ) -> VQEncoderOutput:
__magic_name__ = self.encoder(_lowerCamelCase )
__magic_name__ = self.quant_conv(_lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase )
@apply_forward_hook
def __A ( self : Dict , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
__magic_name__ , __magic_name__ , __magic_name__ = self.quantize(_lowerCamelCase )
else:
__magic_name__ = h
__magic_name__ = self.post_quant_conv(_lowerCamelCase )
__magic_name__ = self.decoder(_lowerCamelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def __A ( self : Union[str, Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__magic_name__ = sample
__magic_name__ = self.encode(_lowerCamelCase ).latents
__magic_name__ = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 664 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 1 |
from __future__ import annotations
def __UpperCAmelCase( lowercase_ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(lowercase_ ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCAmelCase( lowercase_ ): # picklable for multiprocessing
return x.sum()
def __UpperCAmelCase( lowercase_ ): # picklable for multiprocessing
return i + 1
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[int] = [1, 2]
_lowerCamelCase : str = {'''a''': 1, '''b''': 2}
_lowerCamelCase : Dict = {'''a''': [1, 2], '''b''': [3, 4]}
_lowerCamelCase : Any = {'''a''': {'''1''': 1}, '''b''': 2}
_lowerCamelCase : Optional[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_lowerCamelCase : str = {}
_lowerCamelCase : int = []
_lowerCamelCase : str = 2
_lowerCamelCase : int = [2, 3]
_lowerCamelCase : str = {'''a''': 2, '''b''': 3}
_lowerCamelCase : Tuple = {'''a''': [2, 3], '''b''': [4, 5]}
_lowerCamelCase : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
_lowerCamelCase : str = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
_lowerCamelCase : Dict = 2
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
_lowerCamelCase : Any = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)}
_lowerCamelCase : Optional[int] = {'''a''': 2, '''b''': 0, '''c''': 2}
_lowerCamelCase : Optional[int] = {
'''a''': np.eye(2).astype(a__),
'''b''': np.zeros(3).astype(a__),
'''c''': np.ones(2).astype(a__),
}
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a__): # can't pickle a local lambda
map_nested(lambda a__: x + 1 , a__ , num_proc=a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = {'''a''': 1, '''b''': 2}
_lowerCamelCase : Optional[int] = {'''a''': 3, '''b''': 4}
_lowerCamelCase : int = {'''a''': 5, '''b''': 6}
_lowerCamelCase : Optional[int] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(a__ , a__ , a__)) , a__)
def __snake_case ( self):
"""simple docstring"""
class __A :
"""simple docstring"""
UpperCAmelCase__ = """bar"""
_lowerCamelCase : Any = Foo()
self.assertEqual(foo.my_attr , '''bar''')
with temporary_assignment(a__ , '''my_attr''' , '''BAR'''):
self.assertEqual(foo.my_attr , '''BAR''')
self.assertEqual(foo.my_attr , '''bar''')
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
_lowerCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(lowercase_ )}
_lowerCamelCase : List[str] = map_nested(lambda lowercase_ : x + 10 , lowercase_ , num_proc=lowercase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@require_tf
def __snake_case ( self):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
_lowerCamelCase : int = layers.Dense(2)
def gen_random_output():
_lowerCamelCase : Union[str, Any] = tf.random.uniform((1, 3))
return model(a__).numpy()
with temp_seed(42 , set_tensorflow=a__):
_lowerCamelCase : List[str] = gen_random_output()
with temp_seed(42 , set_tensorflow=a__):
_lowerCamelCase : Any = gen_random_output()
_lowerCamelCase : str = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def __snake_case ( self):
"""simple docstring"""
import torch
def gen_random_output():
_lowerCamelCase : Union[str, Any] = torch.nn.Linear(3 , 2)
_lowerCamelCase : Dict = torch.rand(1 , 3)
return model(a__).detach().numpy()
with temp_seed(42 , set_pytorch=a__):
_lowerCamelCase : Any = gen_random_output()
with temp_seed(42 , set_pytorch=a__):
_lowerCamelCase : Optional[int] = gen_random_output()
_lowerCamelCase : Union[str, Any] = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def __snake_case ( self):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
_lowerCamelCase : Union[str, Any] = gen_random_output()
with temp_seed(42):
_lowerCamelCase : List[str] = gen_random_output()
_lowerCamelCase : str = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('''input_data''' , [{}] )
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : List[Any] = NestedDataStructure(lowercase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : int = NestedDataStructure(lowercase_ ).flatten()
assert output == expected_output
def __UpperCAmelCase( ):
_lowerCamelCase : Any = A(x=1 , y='''foobar''' )
_lowerCamelCase : Union[str, Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(lowercase_ ) == expected_output
_lowerCamelCase : Optional[int] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
_lowerCamelCase : Union[str, Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(lowercase_ ) == expected_output
with pytest.raises(lowercase_ ):
asdict([1, A(x=10 , y='''foo''' )] )
def __UpperCAmelCase( lowercase_ ):
return text.split()
def __UpperCAmelCase( lowercase_ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCAmelCase( ):
with Pool(2 ) as pool:
_lowerCamelCase : Tuple = list(iflatmap_unordered(lowercase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_lowerCamelCase : Dict = list(iflatmap_unordered(lowercase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_lowerCamelCase : str = []
for yield_time, content in iflatmap_unordered(
lowercase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase_ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(lowercase_ ) == 4
| 613 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def a__ ( __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 140 | import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Tuple=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[int]=True , __magic_name__ : int=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any=99 , __magic_name__ : Optional[Any]=64 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Dict=5 , __magic_name__ : str=4 , __magic_name__ : List[Any]=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=512 , __magic_name__ : Dict=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : List[str]=3 , __magic_name__ : str=4 , __magic_name__ : List[str]=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = embedding_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : List[str] ) -> int:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = MobileBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = MobileBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = MobileBertForNextSentencePrediction(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = MobileBertForPreTraining(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MobileBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : str , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = MobileBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : int=False ) -> Any:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = MobileBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __A ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
def __A ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__magic_name__ )
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__magic_name__ )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__magic_name__ )
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__magic_name__ )
def __A ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__magic_name__ )
def __A ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__magic_name__ )
def a__ ( __UpperCamelCase ):
return torch.tensor(
__UpperCamelCase , dtype=torch.long , device=__UpperCamelCase , )
A : List[str] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=__magic_name__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 140 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 | """simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = BarthezTokenizer
a__ = BarthezTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
super().setUp()
a__: List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase)
a__: List[str] = tokenizer
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = '<pad>'
a__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_11_22)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22)
@require_torch
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__: int = [0, 57, 30_18, 7_03_07, 91, 2]
a__: Optional[int] = self.tokenizer(
lowercase , max_length=len(lowercase) , padding=lowercase , truncation=lowercase , return_tensors='pt')
self.assertIsInstance(lowercase , lowercase)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
a__: Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: int = self.get_tokenizer()
a__: Union[str, Any] = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: int = tokenizer.tokenize(lowercase)
a__: str = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Union[str, Any] = self.get_rust_tokenizer()
a__: Optional[Any] = tokenizer.encode(lowercase)
a__: int = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Tuple = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowercase , )
| 217 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
A__: Union[str, Any] = datasets.logging.get_logger(__name__)
A__: int = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A__: List[Any] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A__: Tuple = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
if self.config_name == "default":
UpperCamelCase__: List[str] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
UpperCamelCase__: Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: int=None , __lowerCamelCase: int=False ):
'''simple docstring'''
if gpus is None:
UpperCamelCase__: List[Any] = 1 if torch.cuda.is_available() else 0
UpperCamelCase__: str = {"src": sources, "mt": predictions, "ref": references}
UpperCamelCase__: Tuple = [dict(zip(__lowerCamelCase , __lowerCamelCase ) ) for t in zip(*data.values() )]
UpperCamelCase__ , UpperCamelCase__: List[str] = self.scorer.predict(__lowerCamelCase , gpus=__lowerCamelCase , progress_bar=__lowerCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 380 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__: List[Any] = logging.get_logger(__name__)
A__: str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: List[Any] = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__: str = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
A__: int = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = RoFormerTokenizer
def __init__( self: int , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Any=None , __lowerCamelCase: str=True , __lowerCamelCase: Any="[UNK]" , __lowerCamelCase: int="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: Optional[int]="[CLS]" , __lowerCamelCase: Tuple="[MASK]" , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Dict , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
):
UpperCamelCase__: int = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
UpperCamelCase__: Any = do_lower_case
UpperCamelCase__: Optional[int] = strip_accents
UpperCamelCase__: Any = pre_tok_class(**__lowerCamelCase )
UpperCamelCase__: Tuple = do_lower_case
def __getstate__( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.__dict__.copy()
UpperCamelCase__: Dict = BertPreTokenizer()
return state
def __setstate__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = d
UpperCamelCase__: List[Any] = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__: str = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=None ):
'''simple docstring'''
UpperCamelCase__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=None , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 380 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( a ):
'''simple docstring'''
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Dict:
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as input_file:
__lowerCAmelCase : Union[str, Any] = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__lowerCAmelCase : int = input_file.read()
__lowerCAmelCase : Any = regexp.search(SCREAMING_SNAKE_CASE )
return match
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as input_file:
__lowerCAmelCase : List[str] = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
__lowerCAmelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : Dict = regexp.finditer(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case ( self ) -> Tuple:
__lowerCAmelCase : str = Path('./datasets' )
__lowerCAmelCase : Any = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def snake_case ( self ) -> Any:
__lowerCAmelCase : Optional[Any] = Path('./datasets' )
__lowerCAmelCase : str = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 123 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = (IPNDMScheduler,)
_snake_case = (('''num_inference_steps''', 50),)
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase : Optional[int] = {'num_train_timesteps': 10_00}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
__lowerCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
__lowerCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
__lowerCAmelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Union[str, Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ) -> Optional[Any]:
pass
def snake_case ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Union[str, Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.dummy_sample
__lowerCAmelCase : Optional[int] = 0.1 * sample
__lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Tuple = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : int = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
__lowerCAmelCase : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 10
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def snake_case ( self ) -> Any:
__lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Optional[int] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : str = scheduler_class(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.dummy_sample
__lowerCAmelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , 'set_timesteps' ):
__lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
__lowerCAmelCase : Dict = scheduler.timesteps[5]
__lowerCAmelCase : str = scheduler.timesteps[6]
__lowerCAmelCase : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ) -> int:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE , time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = self.full_loop()
__lowerCAmelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 123 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : List[str] = ReformerTokenizer
a : List[str] = ReformerTokenizerFast
a : int = True
a : List[Any] = False
a : Optional[Any] = True
def UpperCAmelCase ( self : Any ) -> Tuple:
super().setUp()
__UpperCAmelCase : List[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = """<s>"""
__UpperCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCAmelCase ( self : Optional[int] ) -> int:
__UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def UpperCAmelCase ( self : Dict ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : List[Any] = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : str = tokenizer.tokenize(_UpperCamelCase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
__UpperCAmelCase : List[str] = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : int = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = tokenizer.encode(_UpperCamelCase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCAmelCase ( self : List[str] , __lowercase : Tuple=15 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
__UpperCAmelCase : Optional[int] = """This is a simple input"""
__UpperCAmelCase : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCAmelCase : str = ("""This is a simple input""", """This is a pair""")
__UpperCAmelCase : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" , )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : List[str] ) -> Any:
__UpperCAmelCase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
__UpperCAmelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase ( self : Dict ) -> List[str]:
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Dict = """Hello World!"""
__UpperCAmelCase : Optional[int] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def UpperCAmelCase ( self : int ) -> int:
__UpperCAmelCase : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__UpperCAmelCase : str = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__UpperCAmelCase : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__UpperCAmelCase : int = """ """.join(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__UpperCAmelCase : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__UpperCAmelCase : Optional[int] = encoded_sequence["""input_ids"""].shape
__UpperCAmelCase : Optional[int] = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__UpperCAmelCase : Optional[int] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 63 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( __UpperCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class __a ( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
def __A ( _UpperCamelCase : ArgumentParser ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=_UpperCamelCase ,help="""Name of the model to download""" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__( self : Union[str, Any] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : bool ,_UpperCamelCase : bool ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =model
SCREAMING_SNAKE_CASE__ =cache
SCREAMING_SNAKE_CASE__ =force
SCREAMING_SNAKE_CASE__ =trust_remote_code
def __A ( self : int ) -> Optional[Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 151 | 0 |
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case__ = n - k
# Calculate C(n,k)
for i in range(__lowerCAmelCase ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return binomial_coefficient(2 * node_count , __lowerCAmelCase ) // (node_count + 1)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
snake_case__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return catalan_number(__lowerCAmelCase ) * factorial(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 530 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = torch.exp(__lowerCAmelCase )
snake_case__ = torch.sum(__lowerCAmelCase , dim=1 ) # sum of exp(x_i)
snake_case__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__lowerCAmelCase ) - B / A
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
snake_case__ = config.output_attentions
snake_case__ = config.output_hidden_states
snake_case__ = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
snake_case__ = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
snake_case__ = [-1 for _ in range(config.num_hidden_layers )]
def A_ ( self , lowerCamelCase ):
if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case__ = x
else:
snake_case__ = x
def A_ ( self , lowerCamelCase ):
snake_case__ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A_ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
snake_case__ = ()
snake_case__ = ()
snake_case__ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case__ = all_hidden_states + (hidden_states,)
snake_case__ = layer_module(
lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase )
snake_case__ = layer_outputs[0]
if self.output_attentions:
snake_case__ = all_attentions + (layer_outputs[1],)
snake_case__ = (hidden_states,)
if self.output_hidden_states:
snake_case__ = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case__ = current_outputs + (all_attentions,)
snake_case__ = self.highway[i](lowerCamelCase )
# logits, pooled_output
if not self.training:
snake_case__ = highway_exit[0]
snake_case__ = entropy(lowerCamelCase )
snake_case__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case__ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase , i + 1 )
else:
snake_case__ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case__ = all_hidden_states + (hidden_states,)
snake_case__ = (hidden_states,)
if self.output_hidden_states:
snake_case__ = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case__ = outputs + (all_attentions,)
snake_case__ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , __UpperCamelCase , )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
snake_case__ = config
snake_case__ = BertEmbeddings(lowerCamelCase )
snake_case__ = DeeBertEncoder(lowerCamelCase )
snake_case__ = BertPooler(lowerCamelCase )
self.init_weights()
def A_ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def A_ ( self ):
return self.embeddings.word_embeddings
def A_ ( self , lowerCamelCase ):
snake_case__ = value
def A_ ( self , lowerCamelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def A_ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
snake_case__ = input_ids.size()
elif inputs_embeds is not None:
snake_case__ = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
snake_case__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case__ = torch.ones(lowerCamelCase , device=lowerCamelCase )
if encoder_attention_mask is None:
snake_case__ = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
snake_case__ = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case__ = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case__ = encoder_attention_mask[:, None, None, :]
snake_case__ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case__ = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case__ = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
snake_case__ = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
snake_case__ = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
snake_case__ = encoder_outputs[0]
snake_case__ = self.pooler(lowerCamelCase )
snake_case__ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = message
snake_case__ = exit_layer # start from 1!
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
snake_case__ = BertPooler(lowerCamelCase )
snake_case__ = nn.Dropout(config.hidden_dropout_prob )
snake_case__ = nn.Linear(config.hidden_size , config.num_labels )
def A_ ( self , lowerCamelCase ):
# Pooler
snake_case__ = encoder_outputs[0]
snake_case__ = self.pooler(lowerCamelCase )
# "return" pooler_output
# BertModel
snake_case__ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case__ = bmodel_output[1]
snake_case__ = self.dropout(lowerCamelCase )
snake_case__ = self.classifier(lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __UpperCamelCase , )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
snake_case__ = config.num_labels
snake_case__ = config.num_hidden_layers
snake_case__ = DeeBertModel(lowerCamelCase )
snake_case__ = nn.Dropout(config.hidden_dropout_prob )
snake_case__ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def A_ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ):
snake_case__ = self.num_layers
try:
snake_case__ = self.bert(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case__ = outputs[1]
snake_case__ = self.dropout(lowerCamelCase )
snake_case__ = self.classifier(lowerCamelCase )
snake_case__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ = e.message
snake_case__ = e.exit_layer
snake_case__ = outputs[0]
if not self.training:
snake_case__ = entropy(lowerCamelCase )
snake_case__ = []
snake_case__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ = MSELoss()
snake_case__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case__ = []
for highway_exit in outputs[-1]:
snake_case__ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ = MSELoss()
snake_case__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
snake_case__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ = (loss,) + outputs
if not self.training:
snake_case__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 530 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , torchscript=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# set architectures equal to `None`
_UpperCAmelCase = None
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tinier_bart'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tinier_bart'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'train_time.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() )
| 618 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ :List[Any] = {
'''sample_size''': 3_2,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [3_2, 6_4],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :List[str] = {
'''sample_size''': 6_4,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :Any = {
'''sample_size''': 2_5_6,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :Optional[int] = {
'''num_train_timesteps''': 4_0,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ :Optional[int] = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ :Any = {
'''num_train_timesteps''': 1_5_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
if isinstance(a__ , a__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCAmelCase__ ( a__: Dict , a__: int , a__: Union[str, Any] , a__: Dict , a__: Optional[int]=False ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCAmelCase = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCAmelCase__ ( a__: Any , a__: Any , a__: List[str] , a__: List[Any] , a__: List[str]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCAmelCase = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCAmelCase = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCAmelCase__ ( a__: str , a__: Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = torch.load(a__ , map_location='cpu' )
_UpperCAmelCase = {}
_UpperCAmelCase = checkpoint['time_embed.0.weight']
_UpperCAmelCase = checkpoint['time_embed.0.bias']
_UpperCAmelCase = checkpoint['time_embed.2.weight']
_UpperCAmelCase = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCAmelCase = checkpoint['label_emb.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.bias']
_UpperCAmelCase = unet_config['down_block_types']
_UpperCAmelCase = unet_config['layers_per_block']
_UpperCAmelCase = unet_config['attention_head_dim']
_UpperCAmelCase = unet_config['block_out_channels']
_UpperCAmelCase = 1
_UpperCAmelCase = channels_list[0]
for i, layer_type in enumerate(a__ ):
_UpperCAmelCase = channels_list[i]
_UpperCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a__ ):
_UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a__ ):
_UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
_UpperCAmelCase = F'''down_blocks.{i}.attentions.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.1'''
_UpperCAmelCase = convert_attention(
a__ , a__ , a__ , a__ , a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
current_layer += 1
_UpperCAmelCase = current_channels
# hardcoded the mid-block for now
_UpperCAmelCase = 'mid_block.resnets.0'
_UpperCAmelCase = 'middle_block.0'
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = 'mid_block.attentions.0'
_UpperCAmelCase = 'middle_block.1'
_UpperCAmelCase = convert_attention(a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = 'mid_block.resnets.1'
_UpperCAmelCase = 'middle_block.2'
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = 0
_UpperCAmelCase = unet_config['up_block_types']
for i, layer_type in enumerate(a__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0'''
_UpperCAmelCase = F'''output_blocks.{current_layer-1}.1'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
_UpperCAmelCase = F'''up_blocks.{i}.attentions.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.1'''
_UpperCAmelCase = convert_attention(
a__ , a__ , a__ , a__ , a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0'''
_UpperCAmelCase = F'''output_blocks.{current_layer-1}.2'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = checkpoint['out.0.weight']
_UpperCAmelCase = checkpoint['out.0.bias']
_UpperCAmelCase = checkpoint['out.2.weight']
_UpperCAmelCase = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ :Dict = parser.parse_args()
lowerCAmelCase__ :List[str] = strabool(args.class_cond)
lowerCAmelCase__ :Any = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ :Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ :Tuple = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ :Any = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowerCAmelCase__ :str = None
lowerCAmelCase__ :Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ :Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ :int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ :Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ :Tuple = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowerCAmelCase__ :Any = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ :Union[str, Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 618 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> str:
"""simple docstring"""
snake_case__ : Dict = {}
snake_case__ : Tuple = {}
if prompt is not None:
snake_case__ : List[str] = prompt
if generate_kwargs is not None:
snake_case__ : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case__ : Dict = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
snake_case__ : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = load_image(lowerCamelCase )
if prompt is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
snake_case__ : int = self.model.config.model_type
if model_type == "git":
snake_case__ : Optional[int] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
snake_case__ : Dict = self.tokenizer(text=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids
snake_case__ : List[Any] = [self.tokenizer.cls_token_id] + input_ids
snake_case__ : List[Any] = torch.tensor(lowerCamelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
snake_case__ : Optional[int] = self.image_processor(images=lowerCamelCase , header_text=lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case__ : List[Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
snake_case__ : Any = self.tokenizer(lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
snake_case__ : str = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case__ : Optional[int] = None
return model_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowerCamelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
snake_case__ : Tuple = None
if generate_kwargs is None:
snake_case__ : Union[str, Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case__ : List[str] = model_inputs.pop(self.model.main_input_name )
snake_case__ : Dict = self.model.generate(lowerCamelCase , **lowerCamelCase , **lowerCamelCase )
return model_outputs
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = []
for output_ids in model_outputs:
snake_case__ : Dict = {
'''generated_text''': self.tokenizer.decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , )
}
records.append(lowerCamelCase )
return records
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
from math import factorial
lowerCAmelCase_ : Any = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : List[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i )
if __name__ == "__main__":
print(f"{solution() = }")
| 442 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCAmelCase_ : int = {'target_lang': 'fi', 'source_lang': 'en'}
lowerCAmelCase_ : str = '>>zh<<'
lowerCAmelCase_ : List[str] = 'Helsinki-NLP/'
if is_torch_available():
lowerCAmelCase_ : Dict = 'pt'
elif is_tf_available():
lowerCAmelCase_ : Union[str, Any] = 'tf'
else:
lowerCAmelCase_ : int = 'jax'
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Dict = MarianTokenizer
__magic_name__ : Any = False
__magic_name__ : str = True
def lowercase_ ( self : Any ):
'''simple docstring'''
super().setUp()
a_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a_ : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
a_ : List[str] = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
a_ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple , **lowercase__ : int ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : int , lowercase__ : int ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[int] = """</s>"""
a_ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : str = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
a_ : Any = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
a_ : str = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
a_ : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
a_ : Union[str, Any] = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : int = self.get_tokenizer()
a_ : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizer()
a_ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : Optional[int] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
a_ : Tuple = """Tämä on testi"""
a_ : Union[str, Any] = """This is a test"""
a_ : Union[str, Any] = [76, 7, 2047, 2]
a_ : Optional[int] = [69, 12, 11, 940, 2]
a_ : Optional[Any] = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : Optional[int] = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : str = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 442 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_snake_case : Optional[Any] = logging.getLogger(__name__)
def A__ ( ):
A = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=UpperCamelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=UpperCamelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=UpperCamelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=UpperCamelCase , default="data/dump" , help="The dump file prefix." )
A = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A = BertTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map["cls_token"] # `<s>`
A = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
A = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(UpperCamelCase )} examples to process." )
A = []
A = 0
A = 10_000
A = time.time()
for text in data:
A = F"{bos} {text.strip()} {sep}"
A = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
rslt.append(UpperCamelCase )
iter += 1
if iter % interval == 0:
A = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(UpperCamelCase )} examples processed." )
A = F"{args.dump_file}.{args.tokenizer_name}.pickle"
A = tokenizer.vocab_size
if vocab_size < (1 << 16):
A = [np.uintaa(UpperCamelCase ) for d in rslt]
else:
A = [np.intaa(UpperCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(UpperCamelCase , "wb" ) as handle:
pickle.dump(rslt_ , UpperCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 524 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_snake_case : List[str] = Lock()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase , UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase , UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase )
def A__ ( UpperCamelCase ):
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase , args=(
len(UpperCamelCase ) - 1,
arr[len(UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A__ ( ):
A = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*UpperCamelCase )
A = odd_even_transposition(UpperCamelCase )
print("Sorted List\n" )
print(*UpperCamelCase )
if __name__ == "__main__":
main()
| 524 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase : Union[str, Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =path + '.py'
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
inspect_metric(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =path + '.py'
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_SCREAMING_SNAKE_CASE =expected_configs[0]
assert expected_config in infos
_SCREAMING_SNAKE_CASE =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
_SCREAMING_SNAKE_CASE =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 405 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase : Optional[Any] = False
try:
lowerCamelCase : Union[str, Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class A__ :
def __init__( self : Tuple , _a : str = None , _a : list = [] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =choices
_SCREAMING_SNAKE_CASE =prompt
if sys.platform == "win32":
_SCREAMING_SNAKE_CASE ='*'
else:
_SCREAMING_SNAKE_CASE ='➔ '
def A ( self : Dict , _a : Union[str, Any] , _a : str = "" ) -> Dict:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def A ( self : str , _a : int ) -> int:
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(_a )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def A ( self : Tuple , _a : Direction , _a : int = 1 ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(chr(self.current_selection ) )
_SCREAMING_SNAKE_CASE =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def A ( self : str , _a : int = 0 ) -> Optional[Any]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
_SCREAMING_SNAKE_CASE =default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_SCREAMING_SNAKE_CASE =int(builtins.input() )
except ValueError:
_SCREAMING_SNAKE_CASE =default_choice
else:
_SCREAMING_SNAKE_CASE =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_a , '\n' )
return choice
| 405 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( __lowerCAmelCase : list[int | str] ):
"""simple docstring"""
create_state_space_tree(__lowerCAmelCase , [] , 0 , [0 for i in range(len(__lowerCAmelCase ) )] )
def _a ( __lowerCAmelCase : list[int | str] , __lowerCAmelCase : list[int | str] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , ):
"""simple docstring"""
if index == len(__lowerCAmelCase ):
print(__lowerCAmelCase )
return
for i in range(len(__lowerCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case__ : Tuple = True
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 , __lowerCAmelCase )
current_sequence.pop()
snake_case__ : int = False
lowerCAmelCase__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 502 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """ctrl"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , snake_case_ : Dict=2_4_6_5_3_4 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Dict=1_2_8_0 , snake_case_ : Union[str, Any]=8_1_9_2 , snake_case_ : Any=4_8 , snake_case_ : List[Any]=1_6 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=1e-6 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Dict=True , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Any = vocab_size
snake_case__ : int = n_positions
snake_case__ : Optional[int] = n_embd
snake_case__ : str = n_layer
snake_case__ : Any = n_head
snake_case__ : str = dff
snake_case__ : Any = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : List[str] = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = use_cache
super().__init__(**snake_case_ )
| 502 | 1 |
from math import sqrt
def lowerCamelCase__ ( _lowerCamelCase ) ->int:
_UpperCAmelCase =0
for i in range(1 , int(sqrt(_lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowerCamelCase ):
total += i + n // i
elif i == sqrt(_lowerCamelCase ):
total += i
return total - n
def lowerCamelCase__ ( _lowerCamelCase = 1_0000 ) ->int:
_UpperCAmelCase =sum(
i
for i in range(1 , _lowerCamelCase )
if sum_of_divisors(sum_of_divisors(_lowerCamelCase ) ) == i and sum_of_divisors(_lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 408 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : int = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case__ : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
snake_case__ : str = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =CamembertTokenizer
snake_case =CamembertTokenizerFast
snake_case =True
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase =CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="<pad>"
_UpperCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_snake_case ) , 1004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase =tokenizer.convert_ids_to_tokens(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.tokenize(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
_UpperCAmelCase ={"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase =[
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_snake_case , )
| 408 | 1 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
_lowerCAmelCase = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'''emoji''': True,
},
}
]
_lowerCAmelCase = 0
for log in Path().glob('''*.log'''):
_lowerCAmelCase = 0
with open(log, '''r''') as f:
for line in f:
_lowerCAmelCase = json.loads(line)
if line.get('''nodeid''', '''''') != "":
_lowerCAmelCase = line['''nodeid''']
if line.get('''duration''', None) is not None:
_lowerCAmelCase = f'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCAmelCase = []
log.unlink()
_lowerCAmelCase = ''''''
_lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCAmelCase = []
_lowerCAmelCase = {}
for test in failed_tests:
_lowerCAmelCase = test[0].split('''::''')
_lowerCAmelCase = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
_lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCAmelCase = [test[0] for test in failed_table]
_lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
_lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCAmelCase = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_lowerCAmelCase = '''Too many failed tests, please see the full report in the Action results.'''
_lowerCAmelCase = len(err) + 10
_lowerCAmelCase = message[: 3000 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
_lowerCAmelCase = '''No failed tests! 🤗'''
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
_lowerCAmelCase = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
_lowerCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
_lowerCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
_lowerCAmelCase = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
_lowerCAmelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
_lowerCAmelCase = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCAmelCase = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCAmelCase = row[0]
else:
_lowerCAmelCase = ''''''
_lowerCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 399 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=36 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=1_000 , ) -> int:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = text_seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase__ = text_seq_length
lowercase__ = (image_size // patch_size) ** 2 + 1
lowercase__ = self.text_seq_length + self.image_seq_length
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = LayoutLMvaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# text + image
lowercase__ = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ )
lowercase__ = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase__ = model(pixel_values=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LayoutLMvaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LayoutLMvaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = LayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : Union[str, Any] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
return True
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = LayoutLMvaModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__ = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
lowercase__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
lowercase__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
elif model_class in get_values(lowerCamelCase__ ):
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
elif model_class in [
*get_values(lowerCamelCase__ ),
]:
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
elif model_class in [
*get_values(lowerCamelCase__ ),
]:
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase__ , )
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase__ )
lowercase__ = torch.tensor([[1, 2]] )
lowercase__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowercase__ = model(
input_ids=input_ids.to(lowerCamelCase__ ) , bbox=bbox.to(lowerCamelCase__ ) , pixel_values=pixel_values.to(lowerCamelCase__ ) , )
# verify the logits
lowercase__ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
lowercase__ = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 325 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( lowercase__ = "isbn/0140328726" ):
lowercase__ = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowercase__ = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowercase__ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def _A ( lowercase__ ):
lowercase__ = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowercase__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase__ = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowercase__ = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
lowercase__ = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__A = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 325 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowercase( __snake_case : Optional[Any] ,__snake_case : Union[str, Any]=1 ) -> int:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowercase( __snake_case : str ,__snake_case : List[Any]=0 ) -> Union[str, Any]:
__snake_case = []
for old_item in old_list:
__snake_case = old_item.replace('in_layers.0' ,'norm1' )
__snake_case = new_item.replace('in_layers.2' ,'conv1' )
__snake_case = new_item.replace('out_layers.0' ,'norm2' )
__snake_case = new_item.replace('out_layers.3' ,'conv2' )
__snake_case = new_item.replace('emb_layers.1' ,'time_emb_proj' )
__snake_case = new_item.replace('skip_connection' ,'conv_shortcut' )
__snake_case = shave_segments(snake_case__ ,n_shave_prefix_segments=snake_case__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowercase( __snake_case : Dict ,__snake_case : Dict=0 ) -> List[str]:
__snake_case = []
for old_item in old_list:
__snake_case = old_item
__snake_case = new_item.replace('norm.weight' ,'group_norm.weight' )
__snake_case = new_item.replace('norm.bias' ,'group_norm.bias' )
__snake_case = new_item.replace('proj_out.weight' ,'proj_attn.weight' )
__snake_case = new_item.replace('proj_out.bias' ,'proj_attn.bias' )
__snake_case = shave_segments(snake_case__ ,n_shave_prefix_segments=snake_case__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowercase( __snake_case : str ,__snake_case : Union[str, Any] ,__snake_case : List[str] ,__snake_case : str=None ,__snake_case : str=None ,__snake_case : List[str]=None ) -> Dict:
assert isinstance(snake_case__ ,snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__snake_case = old_checkpoint[path]
__snake_case = old_tensor.shape[0] // 3
__snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__snake_case = old_tensor.shape[0] // config["""num_head_channels"""] // 3
__snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__snake_case = old_tensor.split(channels // num_heads ,dim=1 )
__snake_case = query.reshape(snake_case__ )
__snake_case = key.reshape(snake_case__ )
__snake_case = value.reshape(snake_case__ )
for path in paths:
__snake_case = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__snake_case = new_path.replace('middle_block.0' ,'mid_block.resnets.0' )
__snake_case = new_path.replace('middle_block.1' ,'mid_block.attentions.0' )
__snake_case = new_path.replace('middle_block.2' ,'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__snake_case = new_path.replace(replacement['old'] ,replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__snake_case = old_checkpoint[path["""old"""]][:, :, 0]
else:
__snake_case = old_checkpoint[path["""old"""]]
def __lowercase( __snake_case : Any ,__snake_case : List[str] ) -> Tuple:
__snake_case = {}
__snake_case = checkpoint["""time_embed.0.weight"""]
__snake_case = checkpoint["""time_embed.0.bias"""]
__snake_case = checkpoint["""time_embed.2.weight"""]
__snake_case = checkpoint["""time_embed.2.bias"""]
__snake_case = checkpoint["""input_blocks.0.0.weight"""]
__snake_case = checkpoint["""input_blocks.0.0.bias"""]
__snake_case = checkpoint["""out.0.weight"""]
__snake_case = checkpoint["""out.0.bias"""]
__snake_case = checkpoint["""out.2.weight"""]
__snake_case = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(snake_case__ )
}
for i in range(1 ,snake_case__ ):
__snake_case = (i - 1) // (config["""num_res_blocks"""] + 1)
__snake_case = (i - 1) % (config["""num_res_blocks"""] + 1)
__snake_case = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
__snake_case = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
__snake_case = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
__snake_case = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
__snake_case = renew_resnet_paths(snake_case__ )
__snake_case = {"""old""": f'''input_blocks.{i}.0''', """new""": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
__snake_case = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ ,snake_case__ ,snake_case__ ,additional_replacements=[meta_path, resnet_op] ,config=snake_case__ )
if len(snake_case__ ):
__snake_case = renew_attention_paths(snake_case__ )
__snake_case = {
"""old""": f'''input_blocks.{i}.1''',
"""new""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
__snake_case = {
f'''input_blocks.{i}.1.qkv.bias''': {
"""key""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"""key""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
snake_case__ ,snake_case__ ,snake_case__ ,additional_replacements=[meta_path] ,attention_paths_to_split=snake_case__ ,config=snake_case__ ,)
__snake_case = middle_blocks[0]
__snake_case = middle_blocks[1]
__snake_case = middle_blocks[2]
__snake_case = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ ,snake_case__ ,snake_case__ ,config=snake_case__ )
__snake_case = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ ,snake_case__ ,snake_case__ ,config=snake_case__ )
__snake_case = renew_attention_paths(snake_case__ )
__snake_case = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ ,snake_case__ ,snake_case__ ,attention_paths_to_split=snake_case__ ,config=snake_case__ )
for i in range(snake_case__ ):
__snake_case = i // (config["""num_res_blocks"""] + 1)
__snake_case = i % (config["""num_res_blocks"""] + 1)
__snake_case = [shave_segments(snake_case__ ,2 ) for name in output_blocks[i]]
__snake_case = {}
for layer in output_block_layers:
__snake_case = layer.split('.' )[0], shave_segments(snake_case__ ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
__snake_case = [layer_name]
if len(snake_case__ ) > 1:
__snake_case = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
__snake_case = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
__snake_case = renew_resnet_paths(snake_case__ )
__snake_case = renew_resnet_paths(snake_case__ )
__snake_case = {"""old""": f'''output_blocks.{i}.0''', """new""": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(snake_case__ ,snake_case__ ,snake_case__ ,additional_replacements=[meta_path] ,config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__snake_case = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__snake_case = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
__snake_case = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
__snake_case = []
if len(snake_case__ ):
__snake_case = renew_attention_paths(snake_case__ )
__snake_case = {
"""old""": f'''output_blocks.{i}.1''',
"""new""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
__snake_case = {
f'''output_blocks.{i}.1.qkv.bias''': {
"""key""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"""key""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
snake_case__ ,snake_case__ ,snake_case__ ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None ,config=snake_case__ ,)
else:
__snake_case = renew_resnet_paths(snake_case__ ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__snake_case = """.""".join(['output_blocks', str(snake_case__ ), path['old']] )
__snake_case = """.""".join(['up_blocks', str(snake_case__ ), 'resnets', str(snake_case__ ), path['new']] )
__snake_case = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase_ : List[Any] = parser.parse_args()
lowerCamelCase_ : List[str] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCamelCase_ : List[str] = json.loads(f.read())
lowerCamelCase_ : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCamelCase_ : str = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCamelCase_ : Union[str, Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
lowerCamelCase_ : List[Any] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
lowerCamelCase_ : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 706 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = GPTSanJapaneseTokenizer
lowercase__ = False
lowercase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__snake_case = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、世界。 こんばんは、㔺界。'
__snake_case = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__snake_case = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。こんばんは、世界。😀'
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = tokenizer.encode('あンいワ' )
__snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
__snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
__snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCamelCase ( self ):
# tokenizer has no padding token
pass
| 345 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowercase = '''base_with_context'''
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ) -> Optional[int]:
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCAmelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase_ = weights[F'''layers_{lyr_num}''']
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = ly_weight["""attention"""]
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Any:
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCAmelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase_ = weights[F'''layers_{lyr_num}''']
lowerCamelCase_ = ly_weight["""attention"""]
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> Union[str, Any]:
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCAmelCase__ )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase_ = weights[F'''layers_{lyr_num}''']
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCamelCase_ = ly_weight["""self_attention"""]
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCamelCase_ = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase_ = jnp.tree_util.tree_map(onp.array , UpperCAmelCase__ )
lowerCamelCase_ = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCamelCase_ = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCamelCase_ = inference.parse_training_gin_file(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase__ )
lowerCamelCase_ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCamelCase_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCamelCase_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCamelCase_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase_ = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , UpperCAmelCase__ )
lowerCamelCase_ = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , UpperCAmelCase__ )
lowerCamelCase_ = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , UpperCAmelCase__ )
lowerCamelCase_ = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCamelCase_ = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase__ , continuous_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , melgan=UpperCAmelCase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowercase = parser.parse_args()
main(args)
| 272 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase = logging.getLogger(__name__)
lowercase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase )} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowercase__ ( self : str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowercase__ ( self : Tuple ):
if self.train_file is not None:
lowerCamelCase_ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowerCamelCase_ = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowerCAmelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCamelCase_ = {}
if data_args.train_file is not None:
lowerCamelCase_ = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ = data_args.validation_file
lowerCamelCase_ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase_ = """text"""
lowerCamelCase_ = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase_ = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ = datasets["""train"""].column_names
else:
lowerCamelCase_ = datasets["""validation"""].column_names
lowerCamelCase_ = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase_ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ : Any ):
# Remove empty lines
lowerCamelCase_ = [line for line in examples["""text"""] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
lowerCamelCase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase_ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase_ = model_args.model_name_or_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCamelCase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase_ = trainer.evaluate()
lowerCamelCase_ = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase_ = perplexity
lowerCamelCase_ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 272 | 1 |
from math import factorial, pi
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 3_0 ):
if not isinstance(lowerCamelCase__ , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
lowerCamelCase_ = float(lowerCamelCase__ )
lowerCamelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCamelCase__ ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 3_0 ):
if not isinstance(lowerCamelCase__ , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
lowerCamelCase_ = float(lowerCamelCase__ )
lowerCamelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 313 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = (16, 32, 96, 2_56)
lowerCAmelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ = self.block_out_channels[i]
lowerCamelCase_ = self.block_out_channels[i + 1]
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase )
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase )
lowerCamelCase_ = blocks
lowerCamelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = self.conv_in(lowercase )
lowerCamelCase_ = nn.silu(lowercase )
for block in self.blocks:
lowerCamelCase_ = block(lowercase )
lowerCamelCase_ = nn.silu(lowercase )
lowerCamelCase_ = self.conv_out(lowercase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , snake_case_ , snake_case_ ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = False
lowerCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 12_80
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = "rgb"
lowerCAmelCase__ = (16, 32, 96, 2_56)
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> FrozenDict:
# init input tensors
lowerCamelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ = jnp.zeros(lowercase , dtype=jnp.floataa )
lowerCamelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ = jnp.zeros(lowercase , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ = jax.random.split(lowercase )
lowerCamelCase_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowercase , lowercase , lowercase , lowercase , lowercase )["params"]
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.block_out_channels
lowerCamelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ = FlaxTimestepEmbedding(lowercase , dtype=self.dtype )
lowerCamelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ = self.only_cross_attention
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = block_out_channels[0]
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ = output_channel
lowerCamelCase_ = block_out_channels[i]
lowerCamelCase_ = i == len(lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ = FlaxDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase )
for _ in range(self.layers_per_block ):
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase )
if not is_final_block:
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase )
lowerCamelCase_ = down_blocks
lowerCamelCase_ = controlnet_down_blocks
# mid
lowerCamelCase_ = block_out_channels[-1]
lowerCamelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = True , lowercase = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ = jnp.flip(lowercase , axis=1 )
# 1. time
if not isinstance(lowercase , jnp.ndarray ):
lowerCamelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ = jnp.expand_dims(lowercase , 0 )
lowerCamelCase_ = self.time_proj(lowercase )
lowerCamelCase_ = self.time_embedding(lowercase )
# 2. pre-process
lowerCamelCase_ = jnp.transpose(lowercase , (0, 2, 3, 1) )
lowerCamelCase_ = self.conv_in(lowercase )
lowerCamelCase_ = jnp.transpose(lowercase , (0, 2, 3, 1) )
lowerCamelCase_ = self.controlnet_cond_embedding(lowercase )
sample += controlnet_cond
# 3. down
lowerCamelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase , lowercase ):
lowerCamelCase_ , lowerCamelCase_ = down_block(lowercase , lowercase , lowercase , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ = down_block(lowercase , lowercase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ = self.mid_block(lowercase , lowercase , lowercase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowercase , self.controlnet_down_blocks ):
lowerCamelCase_ = controlnet_block(lowercase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ = controlnet_down_block_res_samples
lowerCamelCase_ = self.controlnet_mid_block(lowercase )
# 6. scaling
lowerCamelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase , mid_block_res_sample=lowercase )
| 313 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = StableDiffusionPanoramaPipeline
__lowerCAmelCase : int = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
UpperCamelCase : str = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase : Union[str, Any] = CLIPTextModel(_A )
UpperCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _A , _A=0 ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.manual_seed(_A )
UpperCamelCase : Optional[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : str = StableDiffusionPanoramaPipeline(**_A )
UpperCamelCase : Optional[int] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : str = self.get_dummy_inputs(_A )
UpperCamelCase : Any = sd_pipe(**_A ).images
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase : Optional[Any] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : List[Any] = StableDiffusionPanoramaPipeline(**_A )
UpperCamelCase : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : List[Any] = self.get_dummy_inputs(_A )
UpperCamelCase : List[Any] = """french fries"""
UpperCamelCase : int = sd_pipe(**_A , negative_prompt=_A )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase : Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : Any = StableDiffusionPanoramaPipeline(**_A )
UpperCamelCase : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : int = self.get_dummy_inputs(_A )
UpperCamelCase : Union[str, Any] = sd_pipe(**_A , view_batch_size=2 )
UpperCamelCase : Tuple = output.images
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase : Union[str, Any] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
UpperCamelCase : Optional[int] = StableDiffusionPanoramaPipeline(**_A )
UpperCamelCase : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : Any = self.get_dummy_inputs(_A )
UpperCamelCase : Optional[Any] = sd_pipe(**_A ).images
UpperCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase : Any = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : Any = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , skip_prk_steps=_A )
UpperCamelCase : Any = StableDiffusionPanoramaPipeline(**_A )
UpperCamelCase : Dict = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : List[Any] = self.get_dummy_inputs(_A )
UpperCamelCase : int = sd_pipe(**_A ).images
UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase : List[Any] = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _A=0 ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = torch.manual_seed(_A )
UpperCamelCase : Optional[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = """stabilityai/stable-diffusion-2-base"""
UpperCamelCase : Union[str, Any] = DDIMScheduler.from_pretrained(_A , subfolder="""scheduler""" )
UpperCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCamelCase : List[Any] = self.get_inputs()
UpperCamelCase : Tuple = pipe(**_A ).images
UpperCamelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase : Optional[Any] = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_A )
UpperCamelCase : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCamelCase : Any = self.get_inputs()
UpperCamelCase : str = pipe(**_A ).images
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase : Union[str, Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 0
def callback_fn(_A , _A , _A ) -> None:
UpperCamelCase : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase : Tuple = latents[0, -3:, -3:, -1]
UpperCamelCase : int = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase : List[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase : Dict = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase : List[Any] = False
UpperCamelCase : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
UpperCamelCase : Dict = DDIMScheduler.from_pretrained(_A , subfolder="""scheduler""" )
UpperCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
UpperCamelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCamelCase : Tuple = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase : Optional[Any] = """stabilityai/stable-diffusion-2-base"""
UpperCamelCase : Any = DDIMScheduler.from_pretrained(_A , subfolder="""scheduler""" )
UpperCamelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
UpperCamelCase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase : int = self.get_inputs()
UpperCamelCase : List[str] = pipe(**_A )
UpperCamelCase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 102 |
from collections.abc import Iterable
from typing import Any
class lowerCamelCase :
def __init__( self :Optional[int] , lowercase :int | None = None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __repr__( self :Tuple ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase :
def __init__( self :Union[str, Any] , lowercase :Node | None = None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = root
def __str__( self :int ) -> str:
"""simple docstring"""
return str(self.root )
def snake_case__ ( self :Optional[Any] , lowercase :Node , lowercase :Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase ): # If it is the right children
SCREAMING_SNAKE_CASE = new_children
else:
SCREAMING_SNAKE_CASE = new_children
else:
SCREAMING_SNAKE_CASE = new_children
def snake_case__ ( self :List[str] , lowercase :Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case__ ( self :Tuple ) -> bool:
"""simple docstring"""
return self.root is None
def snake_case__ ( self :Union[str, Any] , lowercase :List[Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = Node(lowercase ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE = new_node
break
else:
SCREAMING_SNAKE_CASE = parent_node.right
SCREAMING_SNAKE_CASE = parent_node
def snake_case__ ( self :Union[str, Any] , *lowercase :Optional[int] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(lowercase )
def snake_case__ ( self :Union[str, Any] , lowercase :Any ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
SCREAMING_SNAKE_CASE = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE = node.left if value < node.value else node.right
return node
def snake_case__ ( self :str , lowercase :Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE = node.right
return node
def snake_case__ ( self :int , lowercase :Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
SCREAMING_SNAKE_CASE = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE = node.left
return node
def snake_case__ ( self :Optional[int] , lowercase :int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.search(lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase , lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase , node.left )
else:
SCREAMING_SNAKE_CASE = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case__ ( self :Dict , lowercase :Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case__ ( self :Tuple , lowercase :List[str]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case__ ( self :Optional[Any] , lowercase :list , lowercase :Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(lowercase , node.left )
arr.append(node.value )
self.inorder(lowercase , node.right )
def snake_case__ ( self :Tuple , lowercase :int , lowercase :Node ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
self.inorder(lowercase , lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def a ( a ) ->list[Node]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if curr_node is not None:
SCREAMING_SNAKE_CASE = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a ( ) ->None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE = BinarySearchTree()
for i in testlist:
t.insert(a )
# Prints all the elements of the list in order traversal
print(a )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(a )
print(a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 201 | 0 |
'''simple docstring'''
__snake_case = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 701 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->float:
return 10 - x * x
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) >= 0:
raise ValueError("""Wrong space!""" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 603 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
def a__ ( snake_case__ : str , snake_case__ : str ):
_UpperCAmelCase : List[Any] = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_UpperCAmelCase : Tuple = torch.load(hf_hub_download(repo_id=snake_case__ , filename="""pytorch_model.bin""" ) )
_UpperCAmelCase : List[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_UpperCAmelCase : List[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_UpperCAmelCase : int = tensor_value
_UpperCAmelCase : Any = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 643 |
from random import randint, random
def a__ ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ):
_UpperCAmelCase : Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Dict = max(snake_case__ , 0 )
while i < number_of_cells:
_UpperCAmelCase : int = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a__ ( snake_case__ : list , snake_case__ : int ):
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : int = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def a__ ( snake_case__ : list , snake_case__ : float , snake_case__ : int ):
_UpperCAmelCase : Optional[Any] = len(snake_case__ )
# Beforce calculations, the highway is empty
_UpperCAmelCase : Dict = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase : Dict = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
_UpperCAmelCase : List[str] = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
_UpperCAmelCase : List[str] = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase : Dict = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a__ ( snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ):
_UpperCAmelCase : Union[str, Any] = len(highway[0] )
for i in range(snake_case__ ):
_UpperCAmelCase : Tuple = update(highway[i] , snake_case__ , snake_case__ )
_UpperCAmelCase : int = [-1] * number_of_cells
for car_index in range(snake_case__ ):
_UpperCAmelCase : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase : Optional[Any] = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643 | 1 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Any ='docs/source/en/_toctree.yml'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = defaultdict(SCREAMING_SNAKE_CASE__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase : Optional[int] = [key for key, value in counts.items() if value > 1]
lowerCAmelCase : str = []
for duplicate_key in duplicates:
lowerCAmelCase : str = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__ : s["title"].lower() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ,encoding="""utf-8""" ) as f:
lowerCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase : List[str] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase : Optional[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase : Union[str, Any] = api_doc[model_idx]["""sections"""]
lowerCAmelCase : Optional[Any] = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__ ) if """sections""" in section]
lowerCAmelCase : Optional[Any] = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase : int = modality_doc["""sections"""]
lowerCAmelCase : Tuple = clean_model_doc_toc(SCREAMING_SNAKE_CASE__ )
if old_modality_doc != new_modality_doc:
lowerCAmelCase : int = True
if overwrite:
lowerCAmelCase : int = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase : Optional[Any] = model_doc
lowerCAmelCase : Any = api_doc
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ ,allow_unicode=SCREAMING_SNAKE_CASE__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase : List[str] =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 693 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case__: np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCAmelCase__ , UpperCAmelCase__ = np.shape(snake_case__ )
if rows != columns:
UpperCAmelCase__ = (
'\'table\' has to be of square shaped array but got a '
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(snake_case__ )
UpperCAmelCase__ = np.zeros((rows, columns) )
UpperCAmelCase__ = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCAmelCase__ = (table[i][j] - total) / upper[j][j]
UpperCAmelCase__ = 1
for j in range(snake_case__ , snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
UpperCAmelCase__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : TreeNode | None = None
__magic_name__ : TreeNode | None = None
_lowercase : Optional[int] = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase__ ( A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.right )
UpperCAmelCase = 1 - left_distrib_excess
UpperCAmelCase = 1 - right_distrib_excess
UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 210 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = LDMTextToImagePipeline
__magic_name__ : Optional[int] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__magic_name__ : str = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__magic_name__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ : Optional[Any] = False
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def a__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=0 )-> str:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = LDMTextToImagePipeline(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = pipe(**lowerCAmelCase ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
UpperCAmelCase = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=torch.floataa , lowerCAmelCase : Optional[int]=0 )-> str:
"""simple docstring"""
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = np.random.RandomState(lowerCAmelCase ).standard_normal((1, 4, 32, 32) )
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a__( self : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = self.get_inputs(lowerCAmelCase )
UpperCAmelCase = pipe(**lowerCAmelCase ).images
UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
UpperCAmelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Any )-> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=torch.floataa , lowerCAmelCase : List[Any]=0 )-> Tuple:
"""simple docstring"""
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = np.random.RandomState(lowerCAmelCase ).standard_normal((1, 4, 32, 32) )
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = self.get_inputs(lowerCAmelCase )
UpperCAmelCase = pipe(**lowerCAmelCase ).images[0]
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
UpperCAmelCase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 210 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=1_3 , SCREAMING_SNAKE_CASE__ : str=3_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : str=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.6 , SCREAMING_SNAKE_CASE__ : Tuple=None , ):
'''simple docstring'''
__a : List[Any] = parent
__a : int = batch_size
__a : str = image_size
__a : Union[str, Any] = patch_size
__a : Any = num_channels
__a : Optional[int] = is_training
__a : List[Any] = use_labels
__a : Union[str, Any] = hidden_size
__a : Any = num_hidden_layers
__a : Dict = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Dict = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : str = type_sequence_label_size
__a : str = initializer_range
__a : Union[str, Any] = mask_ratio
__a : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : List[str] = (image_size // patch_size) ** 2
__a : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Dict = ViTMAEModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Dict = model(SCREAMING_SNAKE_CASE__ )
__a : List[str] = (self.image_size // self.patch_size) ** 2
__a : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : List[str] = 1
__a : str = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.prepare_config_and_inputs()
__a , __a , __a : Union[str, Any] = config_and_inputs
__a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Union[str, Any] = ViTMAEModelTester(self )
__a : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
np.random.seed(2 )
__a : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__a : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : Any = pt_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__a : Dict = outputs[0].cpu().numpy()
__a : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Any = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Make sure we don't have nans
__a : List[str] = after_outputs[0].cpu().numpy()
__a : Optional[Any] = 0
__a : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = ViTMAEModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( ):
__a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
__a : Optional[int] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(SCREAMING_SNAKE_CASE__ )
__a : Any = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : Tuple = ViTMAEConfig()
__a : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__a : str = model(**SCREAMING_SNAKE_CASE__ , noise=torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ ) )
# verify the logits
__a : List[Any] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(SCREAMING_SNAKE_CASE__ ) , atol=1e-4 ) )
| 577 |
import torch
from transformers import AutoModel
class _UpperCamelCase( torch.nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
__a : List[str] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = W_supports['sizes'].tolist()
__a : Dict = W_supports['start_token_id'].item()
__a : Tuple = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Dict = None
__a : str = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
__a : str = 0
else:
__a : str = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Dict = S[s : s + size][end_token_masks[s : s + size]]
__a : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : List[Any] = torch.vstack((p_starts, p_start) )
__a : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__a : str = p_start
__a : List[Any] = p_end
return p_starts, p_ends
| 577 | 1 |
import requests
def a ( a , a ) ->None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {'''Content-Type''': '''application/json'''}
SCREAMING_SNAKE_CASE = requests.post(a , json={'''text''': message_body} , headers=a )
if response.status_code != 200:
SCREAMING_SNAKE_CASE = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(a )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>') | 201 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[Any] , lowercase :Optional[int]=1_3 , lowercase :Any=7 , lowercase :Tuple=True , lowercase :Optional[int]=True , lowercase :Any=False , lowercase :Any=True , lowercase :Dict=9_9 , lowercase :Dict=3_2 , lowercase :Any=5 , lowercase :Optional[Any]=4 , lowercase :List[str]=6_4 , lowercase :Optional[int]="gelu" , lowercase :int=0.1 , lowercase :str=0.1 , lowercase :List[str]=5_1_2 , lowercase :int=1_6 , lowercase :Any=2 , lowercase :Union[str, Any]=0.02 , lowercase :Optional[int]=3 , lowercase :Optional[Any]=4 , lowercase :Tuple=None , lowercase :int=2 , lowercase :Tuple=2 , lowercase :List[Any]=2 , lowercase :Optional[int]=2 , lowercase :Tuple=4 , lowercase :int=1 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def snake_case__ ( self :Optional[Any] , lowercase :Optional[Any] , lowercase :int , lowercase :Any , lowercase :List[str] , lowercase :Optional[Any] , lowercase :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
SCREAMING_SNAKE_CASE = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self :Dict , lowercase :Dict , lowercase :List[Any] , lowercase :str , lowercase :Union[str, Any] , lowercase :Dict , lowercase :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self :List[str] , lowercase :Optional[Any] , lowercase :Optional[int] , lowercase :str , lowercase :int , lowercase :Optional[Any] , lowercase :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self :Any , lowercase :Optional[Any] , lowercase :List[str] , lowercase :int , lowercase :Any , lowercase :Optional[int] , lowercase :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self :str , lowercase :List[Any] , lowercase :List[str] , lowercase :Optional[int] , lowercase :Tuple , lowercase :Tuple , lowercase :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self :int , lowercase :List[str] , lowercase :List[Any] , lowercase :Tuple , lowercase :str , lowercase :Optional[Any] , lowercase :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : int = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = True
UpperCamelCase_ : List[Any] = False
def snake_case__ ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase , dim=3_7 )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase )
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase )
def snake_case__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase )
def snake_case__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self :Dict ) -> str:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = model(lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase )
SCREAMING_SNAKE_CASE = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-4 ) ) | 201 | 1 |
def _a ( lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[str] = len(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(i + 1 , lowerCamelCase__ ):
if numbers[j] < numbers[i]:
lowerCamelCase_ , lowerCamelCase_ : int = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 144 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase ):
def UpperCAmelCase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = SMALL_MODEL_IDENTIFIER
lowerCamelCase_ : str = 'pt'
lowerCamelCase_ : List[Any] = 'tf'
def UpperCAmelCase_ (self : List[str] , _snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_snake_case )
model_tf.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCamelCase_ : str = FeaturesManager.determine_framework(self.test_model , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : str = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_snake_case ):
lowerCamelCase_ : int = FeaturesManager.determine_framework(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ):
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ : str = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[int] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
with self.assertRaises(_snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 144 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.