code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def A__ ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
SCREAMING_SNAKE_CASE__: str= []
def generate(snake_case_ : int , snake_case_ : list ):
SCREAMING_SNAKE_CASE__: int= [0] * n
res.append(tuple(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Tuple= 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= arr[i], arr[c[i]]
res.append(tuple(snake_case_ ) )
c[i] += 1
SCREAMING_SNAKE_CASE__: Dict= 0
else:
SCREAMING_SNAKE_CASE__: Any= 0
i += 1
generate(len(snake_case_ ) , snake_case_ )
return res
if __name__ == "__main__":
lowercase_ : Any = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 64
|
'''simple docstring'''
def A_ ( snake_case = 1000 ):
SCREAMING_SNAKE_CASE:Tuple = 2**power
SCREAMING_SNAKE_CASE:Optional[int] = str(snake_case )
SCREAMING_SNAKE_CASE:int = list(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
for i in list_num:
sum_of_num += int(snake_case )
return sum_of_num
if __name__ == "__main__":
A_ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A_ = solution(power)
print("Sum of the digits is: ", result)
| 143
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger()
@dataclass
class _UpperCamelCase:
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : list = field(default_factory=__lowerCamelCase )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Tensor ):
'''simple docstring'''
__a : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE__ )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE__ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _UpperCamelCase:
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : List = field(default_factory=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List = field(default_factory=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : bool = True
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Tensor ):
'''simple docstring'''
__a : Optional[Any] = Tracker(self.dest )(SCREAMING_SNAKE_CASE__ ).parametrized
__a : Union[str, Any] = Tracker(self.src )(SCREAMING_SNAKE_CASE__ ).parametrized
__a : int = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.src_skip , SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.dest_skip , SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE__ )} operations while'''
f''' destination module has {len(SCREAMING_SNAKE_CASE__ )}.''' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class _UpperCamelCase( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : nn.Module ):
'''simple docstring'''
super().__init__()
__a : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'''Unexpected layer name {k}'''
__a : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
__a : int = nn.ModuleDict(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
SCREAMING_SNAKE_CASE__ , out_feat_keys=SCREAMING_SNAKE_CASE__ , feature_blocks=self._feature_blocks , )
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[Any] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if x not in self:
__a : Dict = self.convert_name_to_timm(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = partial(lambda: (timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval(), None) )
else:
__a : List[Any] = super().__getitem__(SCREAMING_SNAKE_CASE__ )
return val
class _UpperCamelCase( __lowerCamelCase ):
def __getitem__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__a : Optional[Any] = RegNetModel
else:
__a : int = RegNetForImageClassification
return val
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Tuple[str, str]] ):
for from_key, to_key in keys:
__a : List[str] = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Callable[[], nn.Module] , lowerCamelCase_ : Callable[[], nn.Module] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : Path , lowerCamelCase_ : bool = True , ):
print(f'''Converting {name}...''' )
with torch.no_grad():
__a , __a : Union[str, Any] = from_model_func()
__a : List[str] = our_model_func(lowerCamelCase_ ).eval()
__a : Dict = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ , raise_if_mismatch=lowerCamelCase_ )
__a : int = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCamelCase_ )
if from_state_dict is not None:
__a : List[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__a : Dict = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
__a : Union[str, Any] = manually_copy_vissl_head(lowerCamelCase_ , our_model.state_dict() , lowerCamelCase_ )
our_model.load_state_dict(lowerCamelCase_ )
__a : Union[str, Any] = our_model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
__a : Tuple = (
our_outputs.logits if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else our_outputs.last_hidden_state
)
__a : Any = from_model(lowerCamelCase_ )
__a : Optional[int] = from_output[-1] if type(lowerCamelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__a : str = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
__a : Dict = 2_2_4 if 'seer' not in name else 3_8_4
# we can use the convnext one
__a : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowerCamelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(f'''Pushed {name}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Path , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = True ):
__a : Tuple = 'imagenet-1k-id2label.json'
__a : Any = 1_0_0_0
__a : Optional[int] = (1, num_labels)
__a : Optional[int] = 'huggingface/label-files'
__a : int = num_labels
__a : str = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) ) , 'r' ) )
__a : Optional[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__a : int = idalabel
__a : Union[str, Any] = {v: k for k, v in idalabel.items()}
__a : Optional[Any] = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
__a : Dict = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__a : Optional[int] = NameToOurModelFuncMap()
__a : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase_ : str , lowerCamelCase_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__a : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , model_dir=str(lowerCamelCase_ ) , map_location='cpu' )
__a : Optional[Any] = model_func()
# check if we have a head, if yes add it
__a : List[str] = files['classy_state_dict']['base_model']['model']
__a : List[str] = model_state_dict['trunk']
model.load_state_dict(lowerCamelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
__a : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a : Optional[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a : List[str] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a : List[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__a : str = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a : str = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a : Union[str, Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a : List[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 577
|
def UpperCAmelCase__ ( lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 7 , lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[int] = 0
__a : Any = 1
for current_denominator in range(1 , limit + 1 ):
__a : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__a : int = current_numerator
__a : List[str] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 577
| 1
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
# Initialise PyTorch model
__lowerCamelCase : List[Any] = BertConfig.from_json_file(lowerCamelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCamelCase : int = BertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652
| 1
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCAmelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCAmelCase__ = logging.WARNING
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = os.getenv("DATASETS_VERBOSITY" , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ):
'''simple docstring'''
return __name__.split("." )[0]
def a__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def a__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowerCAmelCase : Optional[int] = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = False
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase : str = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case__ ):
"""simple docstring"""
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return
lowerCAmelCase__ = True
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __call__( self , *snake_case__ , snake_case__=False , **snake_case__ ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ = _tqdm_cls()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : List[str] = True
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : int = False
| 681
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self : List[str] ,A : List[Any] ,A : List[str]=13 ,A : Any=32 ,A : List[str]=3 ,A : Optional[int]=4 ,A : Optional[int]=[10, 20, 30, 40] ,A : str=[2, 2, 3, 2] ,A : Optional[Any]=True ,A : Dict=True ,A : Tuple=37 ,A : List[str]="gelu" ,A : Optional[int]=10 ,A : List[Any]=0.0_2 ,A : Optional[int]=["stage2", "stage3", "stage4"] ,A : List[Any]=[2, 3, 4] ,A : List[Any]=None ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Optional[int] = num_stages
UpperCAmelCase__ : str = hidden_sizes
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = out_features
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : Any = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : str ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Union[str, Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ,A : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Tuple = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : str = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ConvNextVaModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase__ : Tuple = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[int] = model(**A ).loss
loss.backward()
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ : Tuple = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[Any] = model(**A ).loss
loss.backward()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(A )
UpperCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] ,A : Union[str, Any] ,A : str ):
UpperCAmelCase__ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(A ,A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : List[Any] = preprocessor(images=A ,return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**A )
# verify the logits
UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1e-4 ) )
| 65
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowerCAmelCase = get_logger(__name__)
class __magic_name__ :
lowerCAmelCase : Optional[Any] = 'dummy_data'
lowerCAmelCase : List[str] = 'datasets'
lowerCAmelCase : str = False
def __init__( self : Tuple ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[Version, str] ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Optional[List[Callable]] = None ,):
_a : int = 0
_a : str = dataset_name
_a : Optional[int] = cache_dir
_a : int = use_local_dummy_data
_a : Optional[int] = config
# download_callbacks take a single url as input
_a : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_a : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_a : Optional[int] = str(_UpperCAmelCase )
# to be downloaded
_a : Union[str, Any] = None
_a : Dict = None
@property
def __lowercase ( self : Any ):
if self._dummy_file is None:
_a : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowercase ( self : Optional[int] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' ,self.version_name )
@property
def __lowercase ( self : Any ):
return os.path.join(self.dummy_data_folder ,'dummy_data.zip' )
def __lowercase ( self : List[str] ):
_a : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_a : Dict = cached_path(
_UpperCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=_UpperCAmelCase ,force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase ,self.dummy_file_name )
@property
def __lowercase ( self : List[str] ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def __lowercase ( self : Optional[Any] ):
if self._bucket_url is None:
_a : Any = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,'/' ) )
return self._bucket_url
@property
def __lowercase ( self : int ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,'/' ).split('/' )[:-1] )
def __lowercase ( self : Dict ,_UpperCAmelCase : str ,*_UpperCAmelCase : int ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_a : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_a : str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase ,_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase ,_UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[str] ,_UpperCAmelCase : Optional[int] ,*_UpperCAmelCase : Optional[int] ):
return self.download_and_extract(_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[Any] ):
return self.download_and_extract(_UpperCAmelCase )
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,*_UpperCAmelCase : int ,**_UpperCAmelCase : Dict ):
return path
def __lowercase ( self : Optional[Any] ):
return {}
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ):
_a : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
_a : Union[str, Any] = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[int] = [os.path.join(_UpperCAmelCase ,urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
_a : Tuple = single_urls
_a : Optional[Any] = os.path.join(_UpperCAmelCase ,urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
_a : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_a : Dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ):
_a : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_a : Tuple = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' ,_UpperCAmelCase ) ) for url in data_url )
_a : str = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_a : str = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : List[Any] = os.path.join(_UpperCAmelCase ,urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ):
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : List[Any] = os.path.join(_UpperCAmelCase ,urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowercase ( self : Tuple ):
pass
def __lowercase ( self : Optional[int] ):
pass
def __lowercase ( self : List[str] ,_UpperCAmelCase : Dict ):
def _iter_archive_members(_UpperCAmelCase : List[Any] ):
# this preserves the order of the members inside the ZIP archive
_a : int = Path(self.dummy_file ).parent
_a : List[str] = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_a : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
_a : Dict = Path(_UpperCAmelCase )
_a : Dict = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, Any] ):
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
| 358
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = """wavlm"""
def __init__( self : List[str] , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Any=1E-5 , UpperCamelCase__ : Optional[Any]="group" , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : str=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Tuple=320 , UpperCamelCase__ : Union[str, Any]=800 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[Any]=10 , UpperCamelCase__ : List[Any]=320 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : int=100 , UpperCamelCase__ : List[str]=256 , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any="mean" , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=256 , UpperCamelCase__ : Any=(512, 512, 512, 512, 1500) , UpperCamelCase__ : int=(5, 3, 3, 1, 1) , UpperCamelCase__ : Optional[int]=(1, 2, 3, 1, 1) , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : List[Any]=80 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Dict , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A__ : int =hidden_size
A__ : Any =feat_extract_norm
A__ : Optional[Any] =feat_extract_activation
A__ : List[Any] =list(UpperCamelCase__ )
A__ : Optional[int] =list(UpperCamelCase__ )
A__ : int =list(UpperCamelCase__ )
A__ : int =conv_bias
A__ : Union[str, Any] =num_buckets
A__ : int =max_bucket_distance
A__ : Optional[int] =num_conv_pos_embeddings
A__ : Union[str, Any] =num_conv_pos_embedding_groups
A__ : Any =len(self.conv_dim )
A__ : Dict =num_hidden_layers
A__ : str =intermediate_size
A__ : str =hidden_act
A__ : Dict =num_attention_heads
A__ : Dict =hidden_dropout
A__ : Dict =attention_dropout
A__ : Dict =activation_dropout
A__ : List[Any] =feat_proj_dropout
A__ : Optional[Any] =final_dropout
A__ : int =layerdrop
A__ : Union[str, Any] =layer_norm_eps
A__ : List[str] =initializer_range
A__ : Optional[Any] =num_ctc_classes
A__ : Tuple =vocab_size
A__ : str =do_stable_layer_norm
A__ : List[Any] =use_weighted_layer_sum
A__ : Optional[int] =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : int =apply_spec_augment
A__ : Tuple =mask_time_prob
A__ : Optional[Any] =mask_time_length
A__ : List[Any] =mask_time_min_masks
A__ : Union[str, Any] =mask_feature_prob
A__ : Union[str, Any] =mask_feature_length
# parameters for pretraining with codevector quantized representations
A__ : str =num_codevectors_per_group
A__ : str =num_codevector_groups
A__ : Optional[int] =contrastive_logits_temperature
A__ : Any =num_negatives
A__ : Union[str, Any] =codevector_dim
A__ : Union[str, Any] =proj_codevector_dim
A__ : List[Any] =diversity_loss_weight
# ctc loss
A__ : List[Any] =ctc_loss_reduction
A__ : Optional[int] =ctc_zero_infinity
# adapter
A__ : int =add_adapter
A__ : Dict =adapter_kernel_size
A__ : Any =adapter_stride
A__ : Union[str, Any] =num_adapter_layers
A__ : List[Any] =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ : List[Any] =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ : Optional[Any] =list(UpperCamelCase__ )
A__ : str =list(UpperCamelCase__ )
A__ : Tuple =list(UpperCamelCase__ )
A__ : str =xvector_output_dim
@property
def _UpperCAmelCase ( self : Tuple ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 595
|
"""simple docstring"""
from collections import defaultdict
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Union[str, Any] =1
A__ : int =True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase )
if ret % 2 == 0:
cuts.append(UpperCamelCase )
return ret
def lowercase ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__A , __A : List[str] = 10, 9
__A : Dict = defaultdict(list)
__A : dict[int, bool] = {}
__A : list[int] = []
__A : List[str] = 0
__A : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 595
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : List[str] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowercase__ : Tuple = {
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ['input_ids', 'attention_mask']
_snake_case : Any = RobertaTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Optional[Any]="<mask>" , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
_UpperCamelCase = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**lowerCAmelCase__ )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = '''post_processor'''
_UpperCamelCase = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
_UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase = tuple(state['''sep'''] )
if "cls" in state:
_UpperCamelCase = tuple(state['''cls'''] )
_UpperCamelCase = False
if state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
_UpperCamelCase = add_prefix_space
_UpperCamelCase = True
if state.get('''trim_offsets''' , lowerCAmelCase__ ) != trim_offsets:
_UpperCamelCase = trim_offsets
_UpperCamelCase = True
if changes_to_apply:
_UpperCamelCase = getattr(lowerCAmelCase__ , state.pop('''type''' ) )
_UpperCamelCase = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def snake_case__ ( self : Optional[int] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : str , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
_UpperCamelCase = value
def snake_case__ ( self : Dict , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : int , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=None ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 98
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ : Dict = logging.get_logger(__name__)
a_ : Dict = Dict[str, Any]
a_ : str = List[Prediction]
@add_end_docstrings(lowercase__ )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case_ ( self , **__a ):
__lowerCamelCase : List[str] = {}
if "threshold" in kwargs:
__lowerCamelCase : Optional[int] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *__a , **__a ):
return super().__call__(*__a , **__a )
def snake_case_ ( self , __a ):
__lowerCamelCase : Optional[Any] = load_image(__a )
__lowerCamelCase : Any = torch.IntTensor([[image.height, image.width]] )
__lowerCamelCase : Any = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__lowerCamelCase : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__lowerCamelCase : Dict = target_size
return inputs
def snake_case_ ( self , __a ):
__lowerCamelCase : Union[str, Any] = model_inputs.pop('target_size' )
__lowerCamelCase : Optional[Any] = self.model(**__a )
__lowerCamelCase : Any = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCamelCase : Optional[Any] = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , __a , __a=0.9 ):
__lowerCamelCase : Dict = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCamelCase , __lowerCamelCase : Dict = target_size[0].tolist()
def unnormalize(__a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__lowerCamelCase , __lowerCamelCase : Tuple = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCamelCase : List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCamelCase : Union[str, Any] = [unnormalize(__a ) for bbox in model_outputs['bbox'].squeeze(0 )]
__lowerCamelCase : List[str] = ['score', 'label', 'box']
__lowerCamelCase : Tuple = [dict(zip(__a , __a ) ) for vals in zip(scores.tolist() , __a , __a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCamelCase : Optional[int] = self.image_processor.post_process_object_detection(__a , __a , __a )
__lowerCamelCase : Any = raw_annotations[0]
__lowerCamelCase : Any = raw_annotation['scores']
__lowerCamelCase : Tuple = raw_annotation['labels']
__lowerCamelCase : Union[str, Any] = raw_annotation['boxes']
__lowerCamelCase : List[str] = scores.tolist()
__lowerCamelCase : str = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCamelCase : List[Any] = [self._get_bounding_box(__a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCamelCase : int = ['score', 'label', 'box']
__lowerCamelCase : int = [
dict(zip(__a , __a ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def snake_case_ ( self , __a ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = box.int().tolist()
__lowerCamelCase : Dict = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 594
| 0
|
from __future__ import annotations
UpperCamelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_ ) -> None:
UpperCAmelCase__ =graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase__ ={}
UpperCAmelCase__ =source_vertex
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase__ ={self.source_vertex}
UpperCAmelCase__ =None
UpperCAmelCase__ =[self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase__ =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_ )
UpperCAmelCase__ =vertex
queue.append(A_ )
def __UpperCAmelCase ( self, A_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase__ =self.parent.get(A_ )
if target_vertex_parent is None:
UpperCAmelCase__ =(
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(A_ )
return self.shortest_path(A_ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
UpperCamelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 510
|
import math
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = int(lowerCamelCase__ )
# Initialize Result
lowerCAmelCase__ = []
# Traverse through all denomination
for denomination in reversed(lowerCamelCase__ ):
# Find denominations
while int(lowerCamelCase__ ) >= int(lowerCamelCase__ ):
total_value -= int(lowerCamelCase__ )
answer.append(lowerCamelCase__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCAmelCase : str = []
__lowerCAmelCase : Any = """0"""
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__lowerCAmelCase : int = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"Denomination {i}: ").strip()))
__lowerCAmelCase : Union[str, Any] = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCAmelCase : Union[str, Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__lowerCAmelCase : str = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"Following is minimal change for {value}: ")
__lowerCAmelCase : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 644
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _UpperCamelCase ( self : int , a_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(a_ , a_ ):
lowerCamelCase__ = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Dict , a_ : Any , a_ : Optional[int] , a_ : Tuple ):
"""simple docstring"""
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(a_ ) )
if isinstance(a_ , a_ ):
lowerCamelCase__ = [sequences]
lowerCamelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Tuple=ZeroShotClassificationArgumentHandler() , *a_ : Optional[Any] , **a_ : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _UpperCamelCase ( self : str , a_ : List[Any] , a_ : Optional[Any]=True , a_ : List[Any]=True , a_ : str=TruncationStrategy.ONLY_FIRST , **a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCamelCase__ = self.tokenizer.eos_token
try:
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCamelCase ( self : List[str] , **a_ : Union[str, Any] ):
"""simple docstring"""
if kwargs.get("""multi_class""" , a_ ) is not None:
lowerCamelCase__ = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCamelCase__ = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ = kwargs["""hypothesis_template"""]
lowerCamelCase__ = {}
if "multi_label" in kwargs:
lowerCamelCase__ = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , a_ : Union[str, List[str]] , *a_ : str , **a_ : Dict , ):
"""simple docstring"""
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(a_ , **a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : int , a_ : List[str]=None , a_ : Tuple="This example is {}." ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
lowerCamelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def _UpperCamelCase ( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = inputs["""candidate_label"""]
lowerCamelCase__ = inputs["""sequence"""]
lowerCamelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ = self.model(**a_ )
lowerCamelCase__ = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _UpperCamelCase ( self : Union[str, Any] , a_ : List[Any] , a_ : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCamelCase__ = [outputs["""sequence"""] for outputs in model_outputs]
lowerCamelCase__ = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCamelCase__ = logits.shape[0]
lowerCamelCase__ = len(a_ )
lowerCamelCase__ = N // n
lowerCamelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ = self.entailment_id
lowerCamelCase__ = -1 if entailment_id == 0 else 0
lowerCamelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ = reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 165
| 0
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = '.'
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
snake_case_ : Dict = []
snake_case_ : List[str] = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : Optional[int] = line.strip()
snake_case_ : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Any = '\n'.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 713
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''BlipImageProcessor'''
_snake_case = '''AutoTokenizer'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
# add QFormer tokenizer
UpperCamelCase = qformer_tokenizer
def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
UpperCamelCase = BatchFeature()
if text is not None:
UpperCamelCase = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
encoding.update(lowerCamelCase__ )
UpperCamelCase = self.qformer_tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = qformer_text_encoding.pop('''input_ids''' )
UpperCamelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
UpperCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase ( self , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase__ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
UpperCamelCase = os.path.join(lowerCamelCase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowerCamelCase__ )
return super().save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , subfolder='''qformer_tokenizer''' )
UpperCamelCase = cls._get_arguments_from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
args.append(lowerCamelCase__ )
return cls(*lowerCamelCase__ )
| 350
| 0
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
SCREAMING_SNAKE_CASE: Tuple = logging.get_logger(__name__)
class lowercase_ (__A ):
def __init__( self : int , *snake_case__ : str , **snake_case__ : int ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 360
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : List[Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ViTImageProcessor'''
_a : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
a_ = kwargs.pop('feature_extractor' )
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
a_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
a_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 536
| 0
|
'''simple docstring'''
from copy import deepcopy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase = None , lowercase = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Tuple = size
A_ : List[Any] = [0] * size
elif arr is not None:
self.init(lowercase_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Tuple = len(lowercase_ )
A_ : List[str] = deepcopy(lowercase_ )
for i in range(1 , self.size ):
A_ : int = self.next_(lowercase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : str = self.next_(lowercase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
return index - (index & (-index))
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : Union[str, Any] = self.next_(lowercase_ )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
self.add(lowercase_ , value - self.get(lowercase_ ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if right == 0:
return 0
A_ : List[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : List[str] = self.prev(lowercase_ )
return result
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self.prefix(lowercase_ ) - self.prefix(lowercase_ )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.query(lowercase_ , index + 1 )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : int = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70
| 0
|
"""simple docstring"""
__A = {}
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Optional[int] = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : str = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , _lowerCamelCase , 0 )
__lowerCamelCase : Tuple = state_late + state_absent + state_ontime
__lowerCamelCase : List[str] = prizestrings
return prizestrings
def lowercase_ ( _lowerCamelCase: int = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 646
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _snake_case ( a__ ):
snake_case__ = "deformable_detr"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=3 , UpperCAmelCase : Any=300 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : str=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : int=6 , UpperCAmelCase : Any=1024 , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Dict="sine" , UpperCAmelCase : int="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.2_5 , UpperCAmelCase : str=False , **UpperCAmelCase : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Tuple = backbone_config.get("model_type" )
__lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : Optional[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : Tuple = use_timm_backbone
__lowerCamelCase : Any = backbone_config
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[Any] = encoder_ffn_dim
__lowerCamelCase : List[str] = encoder_layers
__lowerCamelCase : Any = encoder_attention_heads
__lowerCamelCase : int = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Union[str, Any] = dropout
__lowerCamelCase : str = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : Dict = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : Dict = init_xavier_std
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : int = auxiliary_loss
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : int = backbone
__lowerCamelCase : Union[str, Any] = use_pretrained_backbone
__lowerCamelCase : Any = dilation
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : Tuple = encoder_n_points
__lowerCamelCase : Dict = decoder_n_points
__lowerCamelCase : Tuple = two_stage
__lowerCamelCase : Any = two_stage_num_proposals
__lowerCamelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
__lowerCamelCase : Tuple = mask_loss_coefficient
__lowerCamelCase : Tuple = dice_loss_coefficient
__lowerCamelCase : Optional[Any] = bbox_loss_coefficient
__lowerCamelCase : List[str] = giou_loss_coefficient
__lowerCamelCase : List[Any] = eos_coefficient
__lowerCamelCase : List[Any] = focal_alpha
__lowerCamelCase : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Any ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.d_model
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 646
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( _a ):
__SCREAMING_SNAKE_CASE :Any = """Speech2TextFeatureExtractor"""
__SCREAMING_SNAKE_CASE :Optional[int] = """Speech2TextTokenizer"""
def __init__( self : str , a__ : List[Any] , a__ : List[str] ):
super().__init__(_A , _A )
__magic_name__ = self.feature_extractor
__magic_name__ = False
def __call__( self : Any , *a__ : Dict , **a__ : Optional[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__magic_name__ = kwargs.pop('''raw_speech''' )
else:
__magic_name__ = kwargs.pop('''audio''' , _A )
__magic_name__ = kwargs.pop('''sampling_rate''' , _A )
__magic_name__ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__magic_name__ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings['''input_ids''']
return inputs
def snake_case__ ( self : Any , *a__ : Optional[Any] , **a__ : Union[str, Any] ):
return self.tokenizer.batch_decode(*_A , **_A )
def snake_case__ ( self : List[Any] , *a__ : Any , **a__ : Any ):
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def snake_case__ ( self : List[str] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.feature_extractor
__magic_name__ = False
| 714
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase = "huggingface-tools/default-prompts"
_lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def UpperCamelCase ( a , a , a="run" ) -> str:
'''simple docstring'''
if prompt_or_repo_id is None:
__magic_name__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a ) is not None:
return prompt_or_repo_id
__magic_name__ = cached_file(
a , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(a , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 245
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCamelCase : WhisperForConditionalGeneration , _lowerCamelCase : WhisperProcessor , _lowerCamelCase : AutoencoderKL , _lowerCamelCase : CLIPTextModel , _lowerCamelCase : CLIPTokenizer , _lowerCamelCase : UNetaDConditionModel , _lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowerCamelCase : StableDiffusionSafetyChecker , _lowerCamelCase : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_lowerCamelCase , speech_processor=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , feature_extractor=_lowerCamelCase , )
def _snake_case ( self : int , _lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
__lowerCamelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=1_6_0_0_0 , _lowerCamelCase : int = 5_1_2 , _lowerCamelCase : int = 5_1_2 , _lowerCamelCase : int = 5_0 , _lowerCamelCase : float = 7.5 , _lowerCamelCase : Optional[Union[str, List[str]]] = None , _lowerCamelCase : Optional[int] = 1 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : Optional[torch.Generator] = None , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCamelCase : int = 1 , **_lowerCamelCase : str , ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.speech_processor.feature_extractor(
_lowerCamelCase , return_tensors="""pt""" , sampling_rate=_lowerCamelCase ).input_features.to(self.device )
__lowerCamelCase : Union[str, Any] = self.speech_model.generate(_lowerCamelCase , max_length=4_8_0_0_0_0 )
__lowerCamelCase : Tuple = self.speech_processor.tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , normalize=_lowerCamelCase )[
0
]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : List[Any] = 1
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : str = len(_lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_lowerCamelCase )}.""" )
# get prompt text embeddings
__lowerCamelCase : int = self.tokenizer(
_lowerCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase : Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowerCamelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = text_embeddings.shape
__lowerCamelCase : Tuple = text_embeddings.repeat(1 , _lowerCamelCase , 1 )
__lowerCamelCase : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : List[str]
if negative_prompt is None:
__lowerCamelCase : List[str] = [""""""] * batch_size
elif type(_lowerCamelCase ) is not type(_lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCamelCase )} !="""
F""" {type(_lowerCamelCase )}.""" )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : List[str] = [negative_prompt]
elif batch_size != len(_lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(_lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__lowerCamelCase : Any = negative_prompt
__lowerCamelCase : Dict = text_input_ids.shape[-1]
__lowerCamelCase : Dict = self.tokenizer(
_lowerCamelCase , padding="""max_length""" , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors="""pt""" , )
__lowerCamelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase : List[str] = uncond_embeddings.shape[1]
__lowerCamelCase : Optional[int] = uncond_embeddings.repeat(1 , _lowerCamelCase , 1 )
__lowerCamelCase : str = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowerCamelCase : Tuple = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device="""cpu""" , dtype=_lowerCamelCase ).to(
self.device )
else:
__lowerCamelCase : Tuple = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowerCamelCase : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowerCamelCase : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : List[Any] = {}
if accepts_eta:
__lowerCamelCase : int = eta
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : List[Any] = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
__lowerCamelCase : Any = self.unet(_lowerCamelCase , _lowerCamelCase , encoder_hidden_states=_lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = 1 / 0.18_215 * latents
__lowerCamelCase : Dict = self.vae.decode(_lowerCamelCase ).sample
__lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : Any = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCamelCase , nsfw_content_detected=_lowerCamelCase )
| 519
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = CodeGenTokenizer
a_ : str = CodeGenTokenizerFast
a_ : int = True
a_ : str = {"add_prefix_space": True}
a_ : Optional[int] = False
def _snake_case ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowerCamelCase : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowerCamelCase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCamelCase : Tuple = {"""unk_token""": """<unk>"""}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _snake_case ( self : int , **_lowerCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _snake_case ( self : Union[str, Any] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _snake_case ( self : str , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = """lower newer"""
__lowerCamelCase : Tuple = """lower newer"""
return input_text, output_text
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = """lower newer"""
__lowerCamelCase : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCamelCase : List[str] = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Tuple = tokens + [tokenizer.unk_token]
__lowerCamelCase : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = """lower newer"""
# Testing tokenization
__lowerCamelCase : Any = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids without special tokens
__lowerCamelCase : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids with special tokens
__lowerCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
__lowerCamelCase : List[str] = tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing the unknown token
__lowerCamelCase : Any = tokens + [rust_tokenizer.unk_token]
__lowerCamelCase : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : List[Any] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] , _lowerCamelCase : Dict=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__lowerCamelCase : Optional[int] = """This is a simple input"""
__lowerCamelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCamelCase : str = ("""This is a simple input""", """This is a pair""")
__lowerCamelCase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__lowerCamelCase : Dict = """This is a simple input"""
__lowerCamelCase : str = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowerCamelCase : Tuple = ("""This is a simple input""", """This is a pair""")
__lowerCamelCase : List[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowerCamelCase : List[Any] = tokenizer.pad_token_id
__lowerCamelCase : Tuple = tokenizer(_lowerCamelCase , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
__lowerCamelCase : Union[str, Any] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="""np""" )
__lowerCamelCase : Optional[int] = tokenizer(*_lowerCamelCase , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
__lowerCamelCase : Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[Any] = """$$$"""
__lowerCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase )
__lowerCamelCase : Any = """This is a simple input"""
__lowerCamelCase : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCamelCase : str = tokenizer.bos_token_id
__lowerCamelCase : Union[str, Any] = tokenizer(_lowerCamelCase )
__lowerCamelCase : Tuple = tokenizer(_lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
__lowerCamelCase : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : str = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
__lowerCamelCase : int = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
__lowerCamelCase : str = """\nif len_a > len_b: result = a\nelse: result = b"""
__lowerCamelCase : Any = tokenizer.encode(_lowerCamelCase )
__lowerCamelCase : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
__lowerCamelCase : List[str] = tokenizer.decode(_lowerCamelCase , truncate_before_pattern=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
pass
| 519
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _A (UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=8 ) ->List[str]:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _A (UpperCamelCase : str , UpperCamelCase : List[Any]=512 , UpperCamelCase : Union[str, Any]=512 ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCamelCase__ : Union[str, Any] = np.array(pil_image.convert("""RGB""" ) )
lowerCamelCase__ : List[str] = arr.astype(np.floataa ) / 127.5 - 1
lowerCamelCase__ : str = np.transpose(UpperCamelCase , [2, 0, 1] )
lowerCamelCase__ : List[str] = torch.from_numpy(UpperCamelCase ).unsqueeze(0 )
return image
class __A ( A_ ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , ):
super().__init__()
self.register_modules(
unet=__magic_name__ , scheduler=__magic_name__ , movq=__magic_name__ , )
lowerCamelCase__ : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
# get the original timestep using init_timestep
lowerCamelCase__ : int = min(int(num_inference_steps * strength ) , __magic_name__ )
lowerCamelCase__ : List[Any] = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase__ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
if not isinstance(__magic_name__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__magic_name__ )}" )
lowerCamelCase__ : List[str] = image.to(device=__magic_name__ , dtype=__magic_name__ )
lowerCamelCase__ : List[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCamelCase__ : Optional[Any] = image
else:
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__magic_name__ )
]
lowerCamelCase__ : List[str] = torch.cat(__magic_name__ , dim=0 )
else:
lowerCamelCase__ : List[Any] = self.movq.encode(__magic_name__ ).latent_dist.sample(__magic_name__ )
lowerCamelCase__ : Optional[Any] = self.movq.config.scaling_factor * init_latents
lowerCamelCase__ : List[Any] = torch.cat([init_latents] , dim=0 )
lowerCamelCase__ : Dict = init_latents.shape
lowerCamelCase__ : Dict = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
# get latents
lowerCamelCase__ : str = self.scheduler.add_noise(__magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase__ : Any = init_latents
return latents
def _snake_case (self , __magic_name__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase__ : List[str] = torch.device(f"cuda:{gpu_id}" )
lowerCamelCase__ : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase__ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__magic_name__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ ,lowerCamelCase__ : int = cpu_offload_with_hook(__magic_name__ , __magic_name__ , prev_module_hook=__magic_name__ )
# We'll offload the last model manually.
lowerCamelCase__ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case (self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 100 , __magic_name__ = 4.0 , __magic_name__ = 0.3 , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ):
lowerCamelCase__ : List[str] = self._execution_device
lowerCamelCase__ : List[str] = guidance_scale > 1.0
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : int = torch.cat(__magic_name__ , dim=0 )
lowerCamelCase__ : Any = image_embeds.shape[0]
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Tuple = torch.cat(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Tuple = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
lowerCamelCase__ : str = negative_image_embeds.repeat_interleave(__magic_name__ , dim=0 )
lowerCamelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Optional[int] = [image]
if not all(isinstance(__magic_name__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(__magic_name__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowerCamelCase__ : List[str] = torch.cat([prepare_image(__magic_name__ , __magic_name__ , __magic_name__ ) for i in image] , dim=0 )
lowerCamelCase__ : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=__magic_name__ )
lowerCamelCase__ : Any = self.movq.encode(__magic_name__ )["""latents"""]
lowerCamelCase__ : List[str] = latents.repeat_interleave(__magic_name__ , dim=0 )
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
lowerCamelCase__ ,lowerCamelCase__ : str = self.get_timesteps(__magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase__ : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = downscale_height_and_width(__magic_name__ , __magic_name__ , self.movq_scale_factor )
lowerCamelCase__ : Union[str, Any] = self.prepare_latents(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , image_embeds.dtype , __magic_name__ , __magic_name__ )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : Any = {"""image_embeds""": image_embeds}
lowerCamelCase__ : List[str] = self.unet(
sample=__magic_name__ , timestep=__magic_name__ , encoder_hidden_states=__magic_name__ , added_cond_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ ,lowerCamelCase__ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ ,lowerCamelCase__ : List[str] = noise_pred.chunk(2 )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = variance_pred.chunk(2 )
lowerCamelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ ,lowerCamelCase__ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : int = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ , )[0]
# post-processing
lowerCamelCase__ : Union[str, Any] = self.movq.decode(__magic_name__ , force_not_quantize=__magic_name__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Tuple = image * 0.5 + 0.5
lowerCamelCase__ : List[Any] = image.clamp(0 , 1 )
lowerCamelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Optional[Any] = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 96
|
from ... import PretrainedConfig
_lowercase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __A ( A_ ):
UpperCamelCase :Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase :int = '''nezha'''
def __init__(self , __magic_name__=21128 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=64 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=0.1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Optional[Any] = max_relative_position
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Union[str, Any] = use_cache
| 96
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ ={
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['OwlViTFeatureExtractor']
lowercase__ =['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase__ =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowercase__ =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowercase__ =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase__ =F"""down_blocks.{i}.resnets.{j}."""
lowercase__ =F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase__ =F"""down_blocks.{i}.attentions.{j}."""
lowercase__ =F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase__ =F"""up_blocks.{i}.resnets.{j}."""
lowercase__ =F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase__ =F"""up_blocks.{i}.attentions.{j}."""
lowercase__ =F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase__ =F"""down_blocks.{i}.downsamplers.0.conv."""
lowercase__ =F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase__ =F"""up_blocks.{i}.upsamplers.0."""
lowercase__ =F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase__ ='mid_block.attentions.0.'
lowercase__ ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase__ =F"""mid_block.resnets.{j}."""
lowercase__ =F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
__a : str = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__a : Tuple = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__a : Tuple = v.replace(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Union[str, Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__a : int = v.replace(lowerCAmelCase__ , lowerCAmelCase__ )
__a : List[str] = v
__a : Optional[int] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase__ =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase__ =F"""encoder.down_blocks.{i}.resnets.{j}."""
lowercase__ =F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase__ =F"""down_blocks.{i}.downsamplers.0."""
lowercase__ =F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase__ =F"""up_blocks.{i}.upsamplers.0."""
lowercase__ =F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase__ =F"""decoder.up_blocks.{i}.resnets.{j}."""
lowercase__ =F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase__ =F"""mid_block.resnets.{i}."""
lowercase__ =F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase__ =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def __UpperCamelCase ( lowerCAmelCase__ : int ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : List[str] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__a : Union[str, Any] = v.replace(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__a : Optional[int] = v.replace(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Optional[Any] = v
__a : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
__a : List[str] = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
__a : Union[str, Any] = reshape_weight_for_sd(lowerCAmelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase__ =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowercase__ ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase__ =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase__ ={'q': 0, 'k': 1, 'v': 2}
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : Any = {}
__a : List[str] = {}
__a : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__a : Tuple = k[: -len('''.q_proj.weight''' )]
__a : List[Any] = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__a : Any = [None, None, None]
__a : List[str] = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__a : str = k[: -len('''.q_proj.bias''' )]
__a : Union[str, Any] = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__a : Union[str, Any] = [None, None, None]
__a : Any = v
continue
__a : Optional[Any] = textenc_pattern.sub(lambda lowerCAmelCase__ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase__ )
__a : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__a : Tuple = textenc_pattern.sub(lambda lowerCAmelCase__ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase__ )
__a : Tuple = torch.cat(lowerCAmelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__a : List[str] = textenc_pattern.sub(lambda lowerCAmelCase__ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase__ )
__a : List[str] = torch.cat(lowerCAmelCase__ )
return new_state_dict
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
return text_enc_dict
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowercase__ =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase__ =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowercase__ =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowercase__ =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase__ =load_file(unet_path, device='cpu')
else:
lowercase__ =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowercase__ =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowercase__ =load_file(vae_path, device='cpu')
else:
lowercase__ =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowercase__ =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowercase__ =load_file(text_enc_path, device='cpu')
else:
lowercase__ =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowercase__ =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowercase__ =convert_unet_state_dict(unet_state_dict)
lowercase__ ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase__ =convert_vae_state_dict(vae_state_dict)
lowercase__ ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase__ ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase__ ={'transformer.' + k: v for k, v in text_enc_dict.items()}
lowercase__ =convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase__ ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowercase__ =convert_text_enc_state_dict(text_enc_dict)
lowercase__ ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase__ ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase__ ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase__ ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 521
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 1.5
SCREAMING_SNAKE_CASE = int(factor * num_class_images )
SCREAMING_SNAKE_CASE = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''', exist_ok=SCREAMING_SNAKE_CASE_ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE = client.query(text=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
SCREAMING_SNAKE_CASE = int(factor * num_images )
SCREAMING_SNAKE_CASE = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1, )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tqdm(desc='downloading real regularization images', total=SCREAMING_SNAKE_CASE_ )
with open(f'''{class_data_dir}/caption.txt''', 'w' ) as fa, open(f'''{class_data_dir}/urls.txt''', 'w' ) as fa, open(
f'''{class_data_dir}/images.txt''', 'w' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE = requests.get(images['url'] )
if img.status_code == 2_0_0:
SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''', 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase_ ( ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('', add_help=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--class_prompt', help='text prompt to retrieve images', required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--class_data_dir', help='path to save images', required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--num_class_images', help='number of images to download', default=2_0_0, type=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
if __name__ == "__main__":
snake_case = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 406
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a: Union[str, Any] = logging.get_logger(__name__)
__a: List[str] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''layoutlmv3'''
def __init__( self : int , lowerCamelCase : Optional[int]=5_0265 , lowerCamelCase : Optional[int]=768 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Any=12 , lowerCamelCase : Optional[int]=3072 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : Any=512 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : List[Any]=1E-5 , lowerCamelCase : Tuple=1 , lowerCamelCase : Dict=0 , lowerCamelCase : Dict=2 , lowerCamelCase : Union[str, Any]=1024 , lowerCamelCase : List[Any]=128 , lowerCamelCase : Tuple=128 , lowerCamelCase : int=True , lowerCamelCase : str=32 , lowerCamelCase : Optional[int]=128 , lowerCamelCase : List[Any]=64 , lowerCamelCase : Optional[Any]=256 , lowerCamelCase : Any=True , lowerCamelCase : int=True , lowerCamelCase : str=True , lowerCamelCase : str=224 , lowerCamelCase : Any=3 , lowerCamelCase : List[Any]=16 , lowerCamelCase : int=None , **lowerCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
vocab_size=lowerCamelCase , hidden_size=lowerCamelCase , num_hidden_layers=lowerCamelCase , num_attention_heads=lowerCamelCase , intermediate_size=lowerCamelCase , hidden_act=lowerCamelCase , hidden_dropout_prob=lowerCamelCase , attention_probs_dropout_prob=lowerCamelCase , max_position_embeddings=lowerCamelCase , type_vocab_size=lowerCamelCase , initializer_range=lowerCamelCase , layer_norm_eps=lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.12''' )
@property
def lowerCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCamelCase ( self : Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCamelCase ( self : List[str] , lowerCamelCase : "ProcessorMixin" , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional["TensorType"] = None , lowerCamelCase : int = 3 , lowerCamelCase : int = 40 , lowerCamelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , lowerCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = dict(
processor(
lowerCamelCase , text=lowerCamelCase , boxes=lowerCamelCase , return_tensors=lowerCamelCase , ) )
return inputs
| 108
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase(_lowercase ):
__snake_case: jnp.ndarray
__snake_case: jnp.ndarray
class lowercase(nn.Module ):
__snake_case: int
__snake_case: Tuple[int] = (16, 32, 96, 256)
__snake_case: jnp.dtype = jnp.floataa
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
a__ = block(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
a__ = self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class lowercase(nn.Module , _lowercase , _lowercase ):
__snake_case: int = 32
__snake_case: int = 4
__snake_case: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case: Union[bool, Tuple[bool]] = False
__snake_case: Tuple[int] = (320, 640, 1280, 1280)
__snake_case: int = 2
__snake_case: Union[int, Tuple[int]] = 8
__snake_case: Optional[Union[int, Tuple[int]]] = None
__snake_case: int = 1280
__snake_case: float = 0.0
__snake_case: bool = False
__snake_case: jnp.dtype = jnp.floataa
__snake_case: bool = True
__snake_case: int = 0
__snake_case: str = "rgb"
__snake_case: Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ , a__ = jax.random.split(__SCREAMING_SNAKE_CASE )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
a__ = self.time_proj(__SCREAMING_SNAKE_CASE )
a__ = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
a__ = controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 273
| 0
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
lowercase : Optional[Any] = parser.parse_args()
lowercase : str = """cpu"""
lowercase : int = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
lowercase : Dict = """path-to-your-trained-model"""
lowercase : Optional[int] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : str = pipe.unet.to(memory_format=torch.channels_last)
lowercase : Any = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Dict = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Optional[Any] = torch.randn(2, 4, 6_4, 6_4)
lowercase : Tuple = torch.rand(1) * 9_9_9
lowercase : List[Any] = torch.randn(2, 7_7, 7_6_8)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : Optional[int] = 6_6_6
lowercase : List[str] = torch.Generator(device).manual_seed(seed)
lowercase : str = {"""generator""": generator}
if args.steps is not None:
lowercase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 714
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = (EulerDiscreteScheduler,)
_A = 10
def lowerCAmelCase ( self : Optional[Any] , **A_ : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Tuple = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**A_ )
return config
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Any = self.scheduler_classes[0]
lowerCamelCase_: Optional[int] = self.get_scheduler_config()
lowerCamelCase_: Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_: Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_: Optional[Any] = self.dummy_model()
lowerCamelCase_: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_: Optional[int] = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_: Dict = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: Tuple = model(A_ , A_ )
lowerCamelCase_: int = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: Union[str, Any] = output.prev_sample
lowerCamelCase_: int = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = self.scheduler_classes[0]
lowerCamelCase_: Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase_: Any = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_: Any = torch.manual_seed(0 )
lowerCamelCase_: Dict = self.dummy_model()
lowerCamelCase_: Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_: Any = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_: int = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: Optional[Any] = model(A_ , A_ )
lowerCamelCase_: List[str] = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: str = output.prev_sample
lowerCamelCase_: int = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_: Any = self.scheduler_classes[0]
lowerCamelCase_: Optional[Any] = self.get_scheduler_config()
lowerCamelCase_: int = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_: Dict = torch.manual_seed(0 )
lowerCamelCase_: Union[str, Any] = self.dummy_model()
lowerCamelCase_: str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_: str = sample.to(A_ )
for t in scheduler.timesteps:
lowerCamelCase_: str = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: str = model(A_ , A_ )
lowerCamelCase_: List[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: int = output.prev_sample
lowerCamelCase_: List[Any] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_: Any = self.scheduler_classes[0]
lowerCamelCase_: Dict = self.get_scheduler_config()
lowerCamelCase_: int = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_: List[str] = torch.manual_seed(0 )
lowerCamelCase_: Union[str, Any] = self.dummy_model()
lowerCamelCase_: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_: List[str] = sample.to(A_ )
for t in scheduler.timesteps:
lowerCamelCase_: int = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: int = model(A_ , A_ )
lowerCamelCase_: List[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: List[Any] = output.prev_sample
lowerCamelCase_: Optional[int] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 584
| 0
|
import math
from collections.abc import Callable
def _A ( __snake_case :Callable[[float], float] , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = xa
__SCREAMING_SNAKE_CASE = xa
while True:
if x_n == x_na or function(__snake_case ) == function(__snake_case ):
raise ZeroDivisionError("float division by zero, could not find root" )
__SCREAMING_SNAKE_CASE = x_na - (
function(__snake_case ) / ((function(__snake_case ) - function(__snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__SCREAMING_SNAKE_CASE = x_na
__SCREAMING_SNAKE_CASE = x_na
def _A ( __snake_case :float ) -> float:
"""simple docstring"""
return math.pow(__snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 693
|
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693
| 1
|
def _SCREAMING_SNAKE_CASE ( __lowercase : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _SCREAMING_SNAKE_CASE ( __lowercase : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : str=37 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[Any]=512 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : str=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , ):
"""simple docstring"""
__A = True
__A = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , ):
"""simple docstring"""
__A = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__A = True
__A = True
__A = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] , dim=-1 )
__A = torch.cat([input_mask, next_mask] , dim=-1 )
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -3:, random_slice_idx].detach()
__A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = LlamaModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = input_dict["""input_ids"""]
__A = input_ids.ne(1 ).to(UpperCamelCase_ )
__A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = """single_label_classification"""
__A = input_dict["""input_ids"""]
__A = input_ids.ne(1 ).to(UpperCamelCase_ )
__A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = """multi_label_classification"""
__A = input_dict["""input_ids"""]
__A = input_ids.ne(1 ).to(UpperCamelCase_ )
__A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ids_tensor([1, 10] , config.vocab_size )
__A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
__A = original_model(UpperCamelCase_ ).last_hidden_state
__A = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A = {"""type""": scaling_type, """factor""": 10.0}
__A = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
__A = scaled_model(UpperCamelCase_ ).last_hidden_state
__A = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__A = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__A = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
__A = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__A = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
__A = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__A = model(torch.tensor(UpperCamelCase_ ) )
__A = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__A = """Simply put, the theory of relativity states that """
__A = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__A = tokenizer.encode(UpperCamelCase_ , return_tensors="""pt""" )
__A = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
__A = model.generate(UpperCamelCase_ , max_new_tokens=64 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
__A = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 199
| 0
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
a__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowercase )
a__ , a__ = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowercase , output_loading_info=_lowercase )
else:
a__ = ProphetNetForConditionalGenerationOld.from_pretrained(_lowercase )
a__ , a__ = ProphetNetForConditionalGeneration.from_pretrained(
_lowercase , output_loading_info=_lowercase )
a__ = ["key_proj", "value_proj", "query_proj"]
a__ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
a__ = key.split("." )
if attributes[0] == "lm_head":
a__ = prophet
a__ = prophet_old
else:
a__ = prophet.prophetnet
a__ = prophet_old.model
a__ = False
for attribute in attributes:
if attribute in mapping:
a__ = mapping[attribute]
if not hasattr(_lowercase , _lowercase ) and len(_lowercase ) > 0:
a__ = attribute
elif hasattr(_lowercase , _lowercase ):
a__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a__ = old_model.weight
logger.info(F'{attribute} is initialized.' )
a__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a__ = old_model.bias
logger.info(F'{attribute} is initialized' )
a__ = True
break
elif attribute in special_keys and hasattr(_lowercase , "in_proj_weight" ):
a__ = old_model.in_proj_weight.shape[0] // 3
a__ = getattr(_lowercase , _lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a__ = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a__ = True
break
if attribute.isdigit():
a__ = model[int(_lowercase )]
a__ = old_model[int(_lowercase )]
else:
a__ = getattr(_lowercase , _lowercase )
if old_attribute == "":
a__ = old_model
else:
if not hasattr(_lowercase , _lowercase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
a__ = getattr(_lowercase , _lowercase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ : Dict = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 331
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase_ : Optional[int] = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _lowerCAmelCase (_lowercase = "mumbai" ):
"""simple docstring"""
a__ = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
a__ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
a__ = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 331
| 1
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A_ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
A_ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = " Hello world! cécé herlolip"
A_ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def __UpperCamelCase ( a) ->Tuple:
lowerCamelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(a, a)
def __UpperCamelCase ( a, a, a) ->Optional[int]:
lowerCamelCase__ = dct.pop(a)
lowerCamelCase__ = val
def __UpperCamelCase ( a) ->Optional[int]:
lowerCamelCase__ = torch.load(a, map_location="cpu")
lowerCamelCase__ = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval()
hub_interface.model.load_state_dict(sd["model"])
return hub_interface
def __UpperCamelCase ( a) ->str:
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(a, a, bias=a)
lowerCamelCase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCamelCase ( a, a, a=None) ->Any:
if not os.path.exists(a):
lowerCamelCase__ = torch.hub.load("pytorch/fairseq", a).eval()
else:
lowerCamelCase__ = load_xsum_checkpoint(a)
bart.model.upgrade_state_dict(bart.model.state_dict())
if hf_checkpoint_name is None:
lowerCamelCase__ = checkpoint_path.replace(".", "-")
lowerCamelCase__ = BartConfig.from_pretrained(a)
lowerCamelCase__ = bart.encode(a).unsqueeze(0)
lowerCamelCase__ = BartTokenizer.from_pretrained(a).encode(a, return_tensors="pt").unsqueeze(0)
if not torch.eq(a, a).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}")
if checkpoint_path == "bart.large.mnli":
lowerCamelCase__ = bart.state_dict()
remove_ignore_keys_(a)
lowerCamelCase__ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(a, a, a)
lowerCamelCase__ = BartForSequenceClassification(a).eval()
model.load_state_dict(a)
lowerCamelCase__ = bart.predict("mnli", a, return_logits=a)
lowerCamelCase__ = model(a)[0] # logits
else: # no classification heads to worry about
lowerCamelCase__ = bart.model.state_dict()
remove_ignore_keys_(a)
lowerCamelCase__ = state_dict["decoder.embed_tokens.weight"]
lowerCamelCase__ = bart.extract_features(a)
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase__ = BartModel(a).eval()
model.load_state_dict(a)
lowerCamelCase__ = model(a).model[0]
else:
lowerCamelCase__ = BartForConditionalGeneration(a).eval() # an existing summarization ckpt
model.model.load_state_dict(a)
if hasattr(a, "lm_head"):
lowerCamelCase__ = make_linear_from_emb(model.model.shared)
lowerCamelCase__ = model.model(a)[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}")
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`")
Path(a).mkdir(exist_ok=a)
model.save_pretrained(a)
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
A_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 713
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __UpperCamelCase ( a, a=False) ->Optional[Any]:
lowerCamelCase__ = OmegaConf.load(a)
if display:
print(yaml.dump(OmegaConf.to_container(a)))
return config
def __UpperCamelCase ( a, a=None, a=None) ->List[Any]:
if conf_path is None:
lowerCamelCase__ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase__ = load_config(a, display=a)
lowerCamelCase__ = VQModel(**config.model.params)
if ckpt_path is None:
lowerCamelCase__ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase__ = torch.load(a, map_location=a)
if ".ckpt" in ckpt_path:
lowerCamelCase__ = sd["state_dict"]
model.load_state_dict(a, strict=a)
model.to(a)
del sd
return model
def __UpperCamelCase ( a, a) ->int:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model.encode(a)
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}")
lowerCamelCase__ = model.decode(a)
return xrec
def __UpperCamelCase ( a, a=False) ->Dict:
lowerCamelCase__ , lowerCamelCase__ = string.rsplit(".", 1)
if reload:
lowerCamelCase__ = importlib.import_module(a)
importlib.reload(a)
return getattr(importlib.import_module(a, package=a), cls)
def __UpperCamelCase ( a) ->int:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", {}))
def __UpperCamelCase ( a, a, a=True, a=True) ->Optional[Any]:
lowerCamelCase__ = instantiate_from_config(a)
if sd is not None:
model.load_state_dict(a)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __UpperCamelCase ( a, a, a, a) ->Dict:
# load the specified checkpoint
if ckpt:
lowerCamelCase__ = torch.load(a, map_location="cpu")
lowerCamelCase__ = pl_sd["global_step"]
print(f"loaded model from global step {global_step}.")
else:
lowerCamelCase__ = {"state_dict": None}
lowerCamelCase__ = None
lowerCamelCase__ = load_model_from_config(config.model, pl_sd["state_dict"], gpu=a, eval_mode=a)["model"]
return model, global_step
| 360
| 0
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ : Union[str, Any] = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Any = '''dummy_data'''
snake_case__ : Dict = '''datasets'''
snake_case__ : str = False
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[Version, str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[List[Callable]] = None , ) -> Tuple:
a_ : Optional[int] = 0
a_ : Dict = dataset_name
a_ : Union[str, Any] = cache_dir
a_ : Optional[Any] = use_local_dummy_data
a_ : Tuple = config
# download_callbacks take a single url as input
a_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a_ : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a_ : Tuple = str(SCREAMING_SNAKE_CASE__ )
# to be downloaded
a_ : Union[str, Any] = None
a_ : Optional[Any] = None
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
if self._dummy_file is None:
a_ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
a_ : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a_ : Union[str, Any] = cached_path(
SCREAMING_SNAKE_CASE__ , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE__ , force_extract=SCREAMING_SNAKE_CASE__ )
return os.path.join(SCREAMING_SNAKE_CASE__ , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
if self._bucket_url is None:
a_ : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , *SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a_ : Optional[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a_ : Any = self.dummy_file_name
# special case when data_url is a dict
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return self.create_dummy_data_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return self.create_dummy_data_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return self.download_and_extract(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
return self.download_and_extract(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
return path
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return {}
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
a_ : Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for single_url in single_urls:
download_callback(SCREAMING_SNAKE_CASE__ )
else:
a_ : List[Any] = single_urls
download_callback(SCREAMING_SNAKE_CASE__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Any = [os.path.join(SCREAMING_SNAKE_CASE__ , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE__ ).name ) ) for x in single_urls]
else:
a_ : int = single_urls
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE__ ).name ) )
a_ : Optional[Any] = value
# make sure that values are unique
if all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a_ : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
a_ : Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a_ : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , SCREAMING_SNAKE_CASE__ ) ) for url in data_url )
a_ : Any = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a_ : List[str] = [data_url[0]] * len(SCREAMING_SNAKE_CASE__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a_ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(SCREAMING_SNAKE_CASE__ )
return dummy_data_list
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
def _iter_archive_members(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
a_ : Tuple = Path(self.dummy_file ).parent
a_ : str = path.relative_to(SCREAMING_SNAKE_CASE__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a_ : Dict = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = Path(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = _iter_archive_members(SCREAMING_SNAKE_CASE__ ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(SCREAMING_SNAKE_CASE__ ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = [paths]
for path in paths:
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
if os.path.basename(SCREAMING_SNAKE_CASE__ ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE__ ):
if os.path.basename(SCREAMING_SNAKE_CASE__ ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(SCREAMING_SNAKE_CASE__ ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 570
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Tuple = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : str = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCAmelCase_ : Any = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
snake_case__ : str = NllbTokenizer
snake_case__ : List[int] = []
snake_case__ : List[int] = []
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
a_ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Optional[Any] = vocab_file
a_ : str = False if not self.vocab_file else True
a_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
a_ : Optional[int] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a_ : Tuple = src_lang if src_lang is not None else 'eng_Latn'
a_ : Any = self.convert_tokens_to_ids(self._src_lang )
a_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> None:
a_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : Dict = [self.sep_token_id]
a_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a_ : Any = src_lang
a_ : Optional[Any] = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
a_ : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchEncoding:
a_ : Union[str, Any] = src_lang
a_ : int = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> None:
a_ : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
a_ : Dict = []
a_ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
a_ : Union[str, Any] = [self.cur_lang_code]
a_ : List[str] = [self.eos_token_id]
a_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
a_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
a_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> None:
a_ : Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
a_ : Optional[Any] = []
a_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
a_ : str = [self.cur_lang_code]
a_ : str = [self.eos_token_id]
a_ : str = self.convert_ids_to_tokens(self.prefix_tokens )
a_ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a_ : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 570
| 1
|
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if len(UpperCamelCase__ ) <= 1:
return lst
UpperCamelCase__ : Union[str, Any] = 1
while i < len(UpperCamelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase__ ,UpperCamelCase__ : int = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase__ : Dict = 1
return lst
if __name__ == "__main__":
lowerCamelCase =input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase =[int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 462
|
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_0_0 , ):
UpperCamelCase__ : Union[str, Any] = x_start
UpperCamelCase__ : List[Any] = fnc(UpperCamelCase__ )
UpperCamelCase__ : Any = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase__ : str = (x_end - x_start) / steps + xa
UpperCamelCase__ : Dict = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase__ : Tuple = xa
UpperCamelCase__ : Union[str, Any] = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowerCamelCase =1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 462
| 1
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
_a : Any = np.zeros((n + 1,) )
_a : List[Any] = ya
_a : List[str] = xa
for k in range(UpperCamelCase_ ):
_a : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 471
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=False ):
_a : List[Any] = OmegaConf.load(UpperCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(UpperCamelCase_ ) ) )
return config
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if conf_path is None:
_a : Tuple = '''./model_checkpoints/vqgan_only.yaml'''
_a : Dict = load_config(UpperCamelCase_ , display=UpperCamelCase_ )
_a : Optional[int] = VQModel(**config.model.params )
if ckpt_path is None:
_a : List[str] = '''./model_checkpoints/vqgan_only.pt'''
_a : Optional[int] = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
if ".ckpt" in ckpt_path:
_a : Dict = sd['''state_dict''']
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
model.to(UpperCamelCase_ )
del sd
return model
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a , _a , _a : Optional[Any] = model.encode(UpperCamelCase_ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_a : List[str] = model.decode(UpperCamelCase_ )
return xrec
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=False ):
_a , _a : Tuple = string.rsplit('''.''' , 1 )
if reload:
_a : int = importlib.import_module(UpperCamelCase_ )
importlib.reload(UpperCamelCase_ )
return getattr(importlib.import_module(UpperCamelCase_ , package=UpperCamelCase_ ) , cls )
def lowerCamelCase_ ( UpperCamelCase_ ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=True ):
_a : List[Any] = instantiate_from_config(UpperCamelCase_ )
if sd is not None:
model.load_state_dict(UpperCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# load the specified checkpoint
if ckpt:
_a : Tuple = torch.load(UpperCamelCase_ , map_location='''cpu''' )
_a : int = pl_sd['''global_step''']
print(f"""loaded model from global step {global_step}.""" )
else:
_a : Dict = {'''state_dict''': None}
_a : List[str] = None
_a : List[Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=UpperCamelCase_ , eval_mode=UpperCamelCase_ )['''model''']
return model, global_step
| 471
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = LEDTokenizerFast
lowerCAmelCase__ = True
def _lowercase ( self: Tuple ):
'''simple docstring'''
super().setUp()
_lowerCamelCase : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : List[Any] = {"unk_token": "<unk>"}
_lowerCamelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def _lowercase ( self: Tuple ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_lowerCamelCase : Optional[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : int = tokenizer(__lowerCAmelCase ,max_length=len(__lowerCAmelCase ) ,padding=__lowerCAmelCase ,return_tensors="pt" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowerCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
@require_torch
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : List[str] = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors="pt" )
self.assertIn("input_ids" ,__lowerCAmelCase )
self.assertIn("attention_mask" ,__lowerCAmelCase )
self.assertNotIn("labels" ,__lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" ,__lowerCAmelCase )
@require_torch
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : List[str] = tokenizer(text_target=__lowerCAmelCase ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def _lowercase ( self: int ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Union[str, Any] = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="pt" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(batch.input_ids.shape ,(2, 5_122) )
@require_torch
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ["A long paragraph for summarization."]
_lowerCamelCase : Union[str, Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : List[Any] = tokenizer(__lowerCAmelCase ,return_tensors="pt" )
_lowerCamelCase : Tuple = tokenizer(text_target=__lowerCAmelCase ,return_tensors="pt" )
_lowerCamelCase : int = inputs["input_ids"]
_lowerCamelCase : int = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowercase ( self: Dict ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Optional[Any] = ["Summary of the text.", "Another summary."]
_lowerCamelCase : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase )
_lowerCamelCase : str = [[0] * len(__lowerCAmelCase ) for x in encoded_output["input_ids"]]
_lowerCamelCase : Tuple = tokenizer.pad(__lowerCAmelCase )
self.assertSequenceEqual(outputs["global_attention_mask"] ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Any = self.tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "A, <mask> AllenNLP sentence."
_lowerCamelCase : int = tokenizer_r.encode_plus(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer_p.encode_plus(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
_lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_lowerCamelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCAmelCase ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 712
|
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , 2 ) - a
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
return 2 * x
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
_lowerCamelCase : Tuple = 2.0
while start <= a:
_lowerCamelCase : int = math.pow(_lowerCamelCase , 2 )
return start
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 9999 , _lowerCamelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_lowerCamelCase : Any = get_initial_point(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = value
_lowerCamelCase : Tuple = value - fx(_lowerCamelCase , _lowerCamelCase ) / fx_derivative(_lowerCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 386
| 0
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10**-10 ) -> float:
'''simple docstring'''
__snake_case = a
while True:
__snake_case = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 371
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCamelCase( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Any=3_0 , SCREAMING_SNAKE_CASE : List[str]=4_0_0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : int=0.9 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = size if size is not None else {"shortest_edge": 3_0}
__snake_case = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize_and_center_crop
__snake_case = size
__snake_case = crop_pct
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "crop_pct" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 3_0} )
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 371
| 1
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a ):
_a = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 5_0257 , lowerCAmelCase : int = 1024 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "gelu_new" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 1e-5 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , ):
super().__init__()
lowerCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
lowerCAmelCase = prefix_inner_dim
lowerCAmelCase = prefix_hidden_dim
lowerCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase = GPTaConfig(
vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , )
lowerCAmelCase = GPTaLMHeadModel(UpperCamelCase_ )
def __lowercase ( self : str , lowerCAmelCase : torch.Tensor , lowerCAmelCase : torch.Tensor , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , ):
lowerCAmelCase = self.transformer.transformer.wte(UpperCamelCase_ )
lowerCAmelCase = self.encode_prefix(UpperCamelCase_ )
lowerCAmelCase = self.decode_prefix(UpperCamelCase_ )
lowerCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : torch.device ):
return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ )
def __lowercase ( self : int , lowerCAmelCase : Optional[Any] ):
return self.encode_prefix(UpperCamelCase_ )
@torch.no_grad()
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = torch.split(UpperCamelCase_ , 1 , dim=0 )
lowerCAmelCase = []
lowerCAmelCase = []
for feature in features:
lowerCAmelCase = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase = self.generate_beam(
input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase = torch.stack(UpperCamelCase_ )
lowerCAmelCase = torch.stack(UpperCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowercase ( self : Any , lowerCAmelCase : int=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 67 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : Optional[int] = None , ):
lowerCAmelCase = eos_token_id
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int )
lowerCAmelCase = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase = input_embeds
else:
lowerCAmelCase = self.transformer.transformer.wte(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
lowerCAmelCase = self.transformer(inputs_embeds=UpperCamelCase_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase = logits.topk(UpperCamelCase_ , -1 )
lowerCAmelCase = generated.expand(UpperCamelCase_ , *generated.shape[1:] )
lowerCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase = next_tokens
else:
lowerCAmelCase = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] )
lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase = -float(np.inf )
lowerCAmelCase = 0
lowerCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase = scores_sum / seq_lengths[:, None]
lowerCAmelCase = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 )
lowerCAmelCase = next_tokens // scores_sum.shape[1]
lowerCAmelCase = seq_lengths[next_tokens_source]
lowerCAmelCase = next_tokens % scores_sum.shape[1]
lowerCAmelCase = next_tokens.unsqueeze(1 )
lowerCAmelCase = tokens[next_tokens_source]
lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase = generated[next_tokens_source]
lowerCAmelCase = scores_sum_average * seq_lengths
lowerCAmelCase = is_stopped[next_tokens_source]
lowerCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze()
if is_stopped.all():
break
lowerCAmelCase = scores / seq_lengths
lowerCAmelCase = scores.argsort(descending=UpperCamelCase_ )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase = [tokens[i] for i in order]
lowerCAmelCase = torch.stack(UpperCamelCase_ , dim=0 )
lowerCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 704
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529
| 0
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" )
UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
UpperCAmelCase = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def _a ( _snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
UpperCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _snake_case )
if "blocks" in key:
UpperCAmelCase = re.sub(R"""blocks""" , """layers""" , _snake_case )
if "attn" in key:
UpperCAmelCase = re.sub(R"""attn""" , """self_attn""" , _snake_case )
if "norm1" in key:
UpperCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , _snake_case )
if "norm2" in key:
UpperCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , _snake_case )
if "encoder.norm" in key:
UpperCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _snake_case )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _snake_case )
if "encoder.pos_embed" in key:
UpperCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _snake_case )
if "encoder.cls_token" in key:
UpperCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _snake_case )
if "self_attn" in key:
UpperCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _snake_case )
return key
@torch.no_grad()
def _a ( _snake_case , _snake_case=None ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = BlipConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase = BlipForConditionalGeneration(_snake_case ).eval()
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCAmelCase = blip_decoder(pretrained=_snake_case , image_size=384 , vit="""base""" )
UpperCAmelCase = pt_model.eval()
UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
hf_model.load_state_dict(_snake_case )
UpperCAmelCase = 384
UpperCAmelCase = load_demo_image(image_size=_snake_case , device="""cpu""" )
UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase = tokenizer(["""a picture of"""] ).input_ids
UpperCAmelCase = hf_model.generate(_snake_case , _snake_case )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase = hf_model.generate(_snake_case )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCAmelCase = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit="""base""" )
vqa_model.eval()
UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
UpperCAmelCase = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
UpperCAmelCase = ["""How many dogs are in this image?"""]
UpperCAmelCase = tokenizer(_snake_case , return_tensors="""pt""" ).input_ids
UpperCAmelCase = hf_vqa_model.generate(_snake_case , _snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCAmelCase = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit="""base""" )
itm_model.eval()
UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
UpperCAmelCase = BlipForImageTextRetrieval(_snake_case )
UpperCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCAmelCase = tokenizer(
_snake_case , return_tensors="""pt""" , padding="""max_length""" , truncation=_snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 341
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,head_mask=A )
UpperCAmelCase = model(A ,token_type_ids=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,n_embd=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(A )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] ,dtype=torch.long ,device=A ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(A ,do_sample=A )
self.assertListEqual(output_ids[0].tolist() ,A )
| 341
| 1
|
import math
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] = 1 / 1_2345 ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 3
while True:
UpperCamelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCamelCase__ ):
UpperCamelCase__ = int(UpperCamelCase__ )
total_partitions += 1
if check_partition_perfect(UpperCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCamelCase__ )
integer += 1
if __name__ == "__main__":
print(f'{solution() = }')
| 703
|
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase__ = [p / w for p, w in zip(UpperCamelCase__, UpperCamelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase__ = sorted(UpperCamelCase__ )
# declaring useful variables
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase__ = sorted_profit_by_weight[length - i - 1]
UpperCamelCase__ = profit_by_weight.index(UpperCamelCase__ )
UpperCamelCase__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
lowercase = [int(x) for x in input("""Input profits separated by spaces: """).split()]
lowercase = [int(x) for x in input("""Input weights separated by spaces: """).split()]
lowercase = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 591
| 0
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase__ )} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCAmelCase_ :
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__lowerCamelCase = field(default=lowerCAmelCase__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
__lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowerCamelCase = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__lowerCamelCase = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__lowerCamelCase = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__lowerCamelCase = field(
default=lowerCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Optional[int]:
'''simple docstring'''
def _dataset(__lowerCamelCase , __lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=__lowerCamelCase , file_path=__lowerCamelCase , block_size=args.block_size , ref_path=__lowerCamelCase , )
return LineByLineTextDataset(tokenizer=__lowerCamelCase , file_path=__lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowerCamelCase , file_path=__lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase__ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
UpperCAmelCase__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase__ : Tuple = AutoModelWithLMHead.from_config(__lowerCamelCase )
model.resize_token_embeddings(len(__lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
UpperCAmelCase__ : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase__ : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase__ : Dict = (
get_dataset(__lowerCamelCase , tokenizer=__lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase__ : Union[str, Any] = (
get_dataset(__lowerCamelCase , tokenizer=__lowerCamelCase , evaluate=__lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase__ : Union[str, Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase__ : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=__lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase__ : Any = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase__ : int = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , data_collator=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , prediction_loss_only=__lowerCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase__ : Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase__ : Union[str, Any] = trainer.evaluate()
UpperCAmelCase__ : str = math.exp(eval_output["""eval_loss"""] )
UpperCAmelCase__ : Optional[int] = {"""perplexity""": perplexity}
UpperCAmelCase__ : int = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(__lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(__lowerCamelCase )
return results
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 79
|
"""simple docstring"""
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_000_000 )-> int:
_lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 650
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
pass
def _lowerCAmelCase ( __magic_name__ : str ) -> List[str]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCamelCase_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowercase : str =INVOICE_URL
lowercase : Dict =list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , '''''' ) ) )
lowercase : Optional[int] ='''What is the placebo?'''
lowercase : Optional[int] =[
{
'''image''': load_image(UpperCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase__ , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [
[
{'''score''': ANY(UpperCAmelCase__ ), '''answer''': ANY(UpperCAmelCase__ ), '''start''': ANY(UpperCAmelCase__ ), '''end''': ANY(UpperCAmelCase__ )},
{'''score''': ANY(UpperCAmelCase__ ), '''answer''': ANY(UpperCAmelCase__ ), '''start''': ANY(UpperCAmelCase__ ), '''end''': ANY(UpperCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : str =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : List[str] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : str =[
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ )
lowercase : Union[str, Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(UpperCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : List[Any] =[]
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , words=UpperCAmelCase__ , boxes=UpperCAmelCase__ , top_k=2 )
self.assertEqual(UpperCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Any =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Optional[int] =INVOICE_URL
lowercase : Dict ='''What is the invoice number?'''
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Optional[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Optional[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[int] =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase__ )
lowercase : Optional[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase__ , revision='''3dc6de3''' , )
lowercase : Dict =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Dict =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Dict =list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : int =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[str] =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase__ )
lowercase : Any =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : List[str] ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Tuple =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[Any] =list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : str =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Tuple =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : str =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
| 92
|
from math import factorial, radians
def lowercase_ ( __snake_case : float , __snake_case : int = 18 , __snake_case : int = 10 ) -> float:
'''simple docstring'''
snake_case__ :Optional[int] = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
snake_case__ :Optional[int] = radians(__snake_case )
snake_case__ :Optional[Any] = angle_in_radians
snake_case__ :Optional[int] = 3
snake_case__ :Union[str, Any] = -1
for _ in range(__snake_case ):
result += (b * (angle_in_radians**a)) / factorial(__snake_case )
snake_case__ :Optional[int] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__snake_case , __snake_case )
if __name__ == "__main__":
__import__("doctest").testmod()
| 241
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase_ ):
'''simple docstring'''
a_ = (DEISMultistepScheduler,)
a_ = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
_lowerCAmelCase : int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self , _snake_case=0 , **_snake_case ):
_lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , _a )
_lowerCAmelCase : Optional[int] = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_a )
_lowerCAmelCase : Dict = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Any = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
_lowerCAmelCase : Any = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , _snake_case=0 , **_snake_case ):
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop("num_inference_steps" , _a )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Any = 0.1 * sample
_lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : int = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : List[Any] = scheduler.step(_a , _a , _a , **_a ).prev_sample
_lowerCAmelCase : str = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self , _snake_case=None , **_snake_case ):
if scheduler is None:
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(**_a )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_a )
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(**_a )
_lowerCAmelCase : str = scheduler_class(**_a )
_lowerCAmelCase : str = 10
_lowerCAmelCase : Optional[int] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : str = model(_a , _a )
_lowerCAmelCase : Tuple = scheduler.step(_a , _a , _a ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , _a )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**_a )
_lowerCAmelCase : Optional[int] = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(_a , "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a , "set_timesteps" ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCAmelCase : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.timesteps[5]
_lowerCAmelCase : Any = scheduler.timesteps[6]
_lowerCAmelCase : Optional[Any] = scheduler.step(_a , _a , _a , **_a ).prev_sample
_lowerCAmelCase : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Tuple = self.full_loop(scheduler=_a )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
_lowerCAmelCase : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : str = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : str = self.full_loop(scheduler=_a )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self ):
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type="deis" , solver_order=_a , solver_type=_a , )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def SCREAMING_SNAKE_CASE__ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
_lowerCAmelCase : Optional[int] = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self ):
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def SCREAMING_SNAKE_CASE__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.full_loop()
_lowerCAmelCase : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Optional[Any] = scheduler_class(**_a )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[Any] = model(_a , _a )
_lowerCAmelCase : int = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
| 712
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = StableDiffusionXLImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
_lowerCAmelCase : Optional[Any] = CLIPTextModel(_snake_case )
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : Optional[int] = CLIPTextModelWithProjection(_snake_case )
_lowerCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=0 ):
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : str = image / 2 + 0.5
if str(_snake_case ).startswith("mps" ):
_lowerCAmelCase : str = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : Union[str, Any] = sd_pipe(**_snake_case ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : int = sd_pipe.to(_snake_case )
_lowerCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
_lowerCAmelCase : Dict = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : str = 3 * ["this is a negative prompt"]
_lowerCAmelCase : str = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs["prompt"]]
_lowerCAmelCase : Tuple = sd_pipe(**_snake_case )
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : int = 3 * ["this is a negative prompt"]
_lowerCAmelCase : List[str] = 3 * [inputs.pop("prompt" )]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = sd_pipe.encode_prompt(_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase : int = sd_pipe(
**_snake_case , prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , pooled_prompt_embeds=_snake_case , negative_pooled_prompt_embeds=_snake_case , )
_lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
_lowerCAmelCase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : Any = np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : Tuple = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase : Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : int = self.get_inputs(_snake_case )
_lowerCAmelCase : List[str] = pipe(**_snake_case ).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 587
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 508
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 508
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCAmelCase ( _a : Optional[int] , _a : Tuple=False ) -> List[str]:
lowerCAmelCase_ : List[Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def _lowerCAmelCase ( _a : Optional[int] , _a : Dict=None , _a : Tuple=None ) -> Optional[int]:
if conf_path is None:
lowerCAmelCase_ : str = "./model_checkpoints/vqgan_only.yaml"
lowerCAmelCase_ : Tuple = load_config(__snake_case , display=__snake_case )
lowerCAmelCase_ : str = VQModel(**config.model.params )
if ckpt_path is None:
lowerCAmelCase_ : Union[str, Any] = "./model_checkpoints/vqgan_only.pt"
lowerCAmelCase_ : List[Any] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowerCAmelCase_ : List[Any] = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def _lowerCAmelCase ( _a : int , _a : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = model.encode(__snake_case )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCAmelCase_ : str = model.decode(__snake_case )
return xrec
def _lowerCAmelCase ( _a : int , _a : Dict=False ) -> Tuple:
lowerCAmelCase_ : List[Any] = string.rsplit(""".""" , 1 )
if reload:
lowerCAmelCase_ : Optional[Any] = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def _lowerCAmelCase ( _a : Optional[Any] ) -> Tuple:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _lowerCAmelCase ( _a : Tuple , _a : List[Any] , _a : Optional[Any]=True , _a : Any=True ) -> int:
lowerCAmelCase_ : str = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCAmelCase ( _a : List[str] , _a : Tuple , _a : Union[str, Any] , _a : Optional[Any] ) -> Tuple:
if ckpt:
lowerCAmelCase_ : int = torch.load(__snake_case , map_location="""cpu""" )
lowerCAmelCase_ : Optional[int] = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
lowerCAmelCase_ : Optional[int] = {"state_dict": None}
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Dict = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 713
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 440
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCAmelCase = logging.getLogger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = """sequence-classification"""
def __init__( self : str ,A : int ):
'''simple docstring'''
if type(A ) == dict:
UpperCAmelCase__ : Tuple = Namespace(**A )
UpperCAmelCase__ : List[str] = glue_output_modes[hparams.task]
UpperCAmelCase__ : Optional[int] = glue_tasks_num_labels[hparams.task]
super().__init__(A ,A ,self.mode )
def __lowercase ( self : int ,**A : Dict ):
'''simple docstring'''
return self.model(**A )
def __lowercase ( self : Dict ,A : Any ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
UpperCAmelCase__ : Union[str, Any] = self(**A )
UpperCAmelCase__ : Dict = outputs[0]
UpperCAmelCase__ : Union[str, Any] = self.trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase__ : Optional[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.hparams
UpperCAmelCase__ : str = processors[args.task]()
UpperCAmelCase__ : List[Any] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCAmelCase__ : Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,A )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
UpperCAmelCase__ : Optional[int] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
UpperCAmelCase__ : Tuple = convert_examples_to_features(
A ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,A )
torch.save(A ,A )
def __lowercase ( self : List[Any] ,A : str ,A : int ,A : bool = False ):
'''simple docstring'''
UpperCAmelCase__ : str = """dev""" if mode == """test""" else mode
UpperCAmelCase__ : Dict = self._feature_file(A )
logger.info("""Loading features from cached file %s""" ,A )
UpperCAmelCase__ : Any = torch.load(A )
UpperCAmelCase__ : Dict = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
UpperCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ : str = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(A ,A ,A ,A ) ,batch_size=A ,shuffle=A ,)
def __lowercase ( self : List[str] ,A : Union[str, Any] ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ : List[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
UpperCAmelCase__ : Dict = self(**A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = outputs[:2]
UpperCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowercase ( self : Dict ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
UpperCAmelCase__ : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ : str = np.argmax(A ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ : Dict = np.squeeze(A )
UpperCAmelCase__ : List[str] = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
UpperCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : int = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : List[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,A ,A )}
UpperCAmelCase__ : Any = dict(results.items() )
UpperCAmelCase__ : Optional[int] = results
return ret, preds_list, out_label_list
def __lowercase ( self : int ,A : list ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self._eval_end(A )
UpperCAmelCase__ : str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowercase ( self : Any ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._eval_end(A )
UpperCAmelCase__ : Union[str, Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowercase ( A : List[str] ,A : Union[str, Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A ,A )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=A ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=A ,required=A ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=A ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
UpperCAmelCase__ : Tuple = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCAmelCase__ : Optional[int] = os.path.join(
"""./results""" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
UpperCAmelCase__ : str = GLUETransformer(__UpperCamelCase )
UpperCAmelCase__ : Tuple = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 65
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 557
| 0
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase = "src/diffusers"
# Pattern that looks at the indentation in a line.
_lowerCAmelCase = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase = re.compile(r"\[([^\]]+)\]")
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ = _re_indent.search(a )
return "" if search is None else search.groups()[0]
def UpperCamelCase ( a , a="" , a=None , a=None ) -> Any:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(a ):
index += 1
__magic_name__ = ['''\n'''.join(lines[:index] )]
else:
__magic_name__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__magic_name__ = [lines[index]]
index += 1
while index < len(a ) and (end_prompt is None or not lines[index].startswith(a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(a ) )
if index < len(a ) - 1:
__magic_name__ = [lines[index + 1]]
index += 1
else:
__magic_name__ = []
else:
blocks.append('''\n'''.join(a ) )
__magic_name__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a ) > 0:
blocks.append('''\n'''.join(a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
def _inner(a ):
return key(a ).lower().replace('''_''' , '''''' )
return _inner
def UpperCamelCase ( a , a=None ) -> Union[str, Any]:
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(a ):
return x
if key is None:
__magic_name__ = noop
# Constants are all uppercase, they go first.
__magic_name__ = [obj for obj in objects if key(a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__magic_name__ = [obj for obj in objects if key(a )[0].isupper() and not key(a ).isupper()]
# Functions begin with a lowercase, they go last.
__magic_name__ = [obj for obj in objects if not key(a )[0].isupper()]
__magic_name__ = ignore_underscore(a )
return sorted(a , key=a ) + sorted(a , key=a ) + sorted(a , key=a )
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(a ):
__magic_name__ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__magic_name__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__magic_name__ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(a )] ) + "]"
__magic_name__ = import_statement.split('''\n''' )
if len(a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__magic_name__ = 2 if lines[1].strip() == '''[''' else 1
__magic_name__ = [(i, _re_strip_line.search(a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__magic_name__ = sort_objects(a , key=lambda a : x[1] )
__magic_name__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__magic_name__ = _re_bracket_content.sub(_replace , lines[1] )
else:
__magic_name__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__magic_name__ = keys[:-1]
__magic_name__ = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(a )] )
return "\n".join(a )
else:
# Finally we have to deal with imports fitting on one line
__magic_name__ = _re_bracket_content.sub(_replace , a )
return import_statement
def UpperCamelCase ( a , a=True ) -> str:
'''simple docstring'''
with open(a , '''r''' ) as f:
__magic_name__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__magic_name__ = split_code_in_indented_blocks(
a , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__magic_name__ = main_blocks[block_idx]
__magic_name__ = block.split('''\n''' )
# Get to the start of the imports.
__magic_name__ = 0
while line_idx < len(a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__magic_name__ = len(a )
else:
line_idx += 1
if line_idx >= len(a ):
continue
# Ignore beginning and last line: they don't contain anything.
__magic_name__ = '''\n'''.join(block_lines[line_idx:-1] )
__magic_name__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__magic_name__ = split_code_in_indented_blocks(a , indent_level=a )
# We have two categories of import key: list or _import_structure[key].append/extend
__magic_name__ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__magic_name__ = [(pattern.search(a ).groups()[0] if pattern.search(a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__magic_name__ = [(i, key) for i, key in enumerate(a ) if key is not None]
__magic_name__ = [x[0] for x in sorted(a , key=lambda a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__magic_name__ = 0
__magic_name__ = []
for i in range(len(a ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__magic_name__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a )
count += 1
# And we put our main block back together with its first and last line.
__magic_name__ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(a , '''w''' ) as f:
f.write('''\n'''.join(a ) )
def UpperCamelCase ( a=True ) -> Dict:
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(a ):
if "__init__.py" in files:
__magic_name__ = sort_imports(os.path.join(a , '''__init__.py''' ) , check_only=a )
if result:
__magic_name__ = [os.path.join(a , '''__init__.py''' )]
if len(a ) > 0:
raise ValueError(F'''Would overwrite {len(a )} files, run `make style`.''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 245
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , a__ : Union[str, Any] , a__ : Optional[Any]=3 , a__ : Optional[Any]=7 , a__ : List[str]=True , a__ : List[Any]=True , a__ : List[str]=False , a__ : List[Any]=True , a__ : Tuple=99 , a__ : Optional[int]=32 , a__ : List[Any]=5 , a__ : Tuple=4 , a__ : Tuple=37 , a__ : int="gelu" , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=0.1 , a__ : Optional[Any]=512 , a__ : int=16 , a__ : Union[str, Any]=2 , a__ : Optional[Any]=0.02 , a__ : Any=3 , a__ : Dict=4 , a__ : Optional[Any]=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def snake_case__ ( self : Dict ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Tuple ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a__ , )
def snake_case__ ( self : Any , a__ : Optional[int] , a__ : Any , a__ : Tuple , a__ : Optional[Any] , a__ : str , a__ : Optional[Any] , a__ : Any ):
__magic_name__ = FalconModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : List[str] , a__ : Optional[Any] , a__ : Tuple , a__ : Optional[Any] , ):
__magic_name__ = True
__magic_name__ = FalconModel(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
__magic_name__ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
__magic_name__ = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any] , a__ : Any , a__ : str , a__ : Any , a__ : Union[str, Any] , a__ : List[Any] , a__ : List[Any] , a__ : Dict , a__ : int , a__ : Dict , ):
__magic_name__ = FalconForCausalLM(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : int , a__ : Any , a__ : int , a__ : Tuple , a__ : List[Any] , a__ : Any , a__ : Optional[int] , a__ : List[str] , a__ : Any , a__ : Any , ):
__magic_name__ = True
__magic_name__ = True
__magic_name__ = FalconForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
__magic_name__ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
__magic_name__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
__magic_name__ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
# select random slice
__magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE :str = (FalconForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :List[str] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = FalconModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def snake_case__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self : int ):
__magic_name__ , *__magic_name__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__magic_name__ = alibi
self.model_tester.create_and_check_model(a__ , *a__ )
def snake_case__ ( self : List[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = input_dict['''input_ids''']
__magic_name__ = input_ids.ne(1 ).to(a__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : Optional[int] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = '''single_label_classification'''
__magic_name__ = input_dict['''input_ids''']
__magic_name__ = input_ids.ne(1 ).to(a__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : Tuple ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = input_dict['''input_ids''']
__magic_name__ = FalconForCausalLM(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , use_cache=a__ )
__magic_name__ = input_ids.shape[0]
__magic_name__ = model._convert_to_rw_cache(result.past_key_values )
__magic_name__ = model._convert_cache_to_standard_format(a__ , a__ )
for layer in range(len(a__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def snake_case__ ( self : Dict ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = '''multi_label_classification'''
__magic_name__ = input_dict['''input_ids''']
__magic_name__ = input_ids.ne(1 ).to(a__ )
__magic_name__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__magic_name__ = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : str ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a__ , '''use_cache''' ):
return
__magic_name__ = model_class(a__ ).to(a__ )
if "use_cache" not in inputs:
__magic_name__ = True
__magic_name__ = model(**a__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__magic_name__ = (
getattr(a__ , '''decoder_layers''' , a__ )
or getattr(a__ , '''num_decoder_layers''' , a__ )
or config.num_hidden_layers
)
__magic_name__ = getattr(a__ , '''num_kv_heads''' , config.num_attention_heads )
__magic_name__ = getattr(a__ , '''d_model''' , config.hidden_size )
__magic_name__ = embed_dim // num_attention_heads
__magic_name__ = outputs['''past_key_values''']
self.assertEqual(len(a__ ) , a__ )
__magic_name__ , __magic_name__ = inputs['''input_ids'''].shape
for i in range(a__ ):
if config.new_decoder_architecture:
__magic_name__ = config.num_attention_heads
elif config.multi_query:
__magic_name__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : List[Any] ):
__magic_name__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__magic_name__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(a__ )
__magic_name__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a__ )
__magic_name__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__magic_name__ = model.generate(**a__ , do_sample=a__ , max_new_tokens=19 )
__magic_name__ = tokenizer.batch_decode(a__ )[0]
self.assertEqual(a__ , a__ )
@slow
def snake_case__ ( self : Optional[int] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__magic_name__ = AutoTokenizer.from_pretrained(a__ )
__magic_name__ = FalconForCausalLM.from_pretrained(a__ )
model.eval()
model.to(a__ )
__magic_name__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a__ , do_sample=a__ , max_new_tokens=4 )
model.generate(**a__ , do_sample=a__ , max_new_tokens=4 )
model.generate(**a__ , num_beams=2 , max_new_tokens=4 )
@slow
def snake_case__ ( self : Optional[Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__magic_name__ = AutoTokenizer.from_pretrained(a__ )
__magic_name__ = FalconForCausalLM.from_pretrained(a__ )
model.eval()
model.to(device=a__ )
__magic_name__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a__ )
# Test results are the same with and without cache
__magic_name__ = model.generate(**a__ , do_sample=a__ , max_new_tokens=20 , use_cache=a__ )
__magic_name__ = model.generate(**a__ , do_sample=a__ , max_new_tokens=20 , use_cache=a__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 245
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A = namedtuple("covid_data", "cases deaths recovered")
def lowerCAmelCase_ ( __a = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
lowerCamelCase__: int ="//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
__A = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 59
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE =range(2, 20 + 1)
__SCREAMING_SNAKE_CASE =[10**k for k in range(ks[-1] + 1)]
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : int = sum(a_i[j] for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) )
lowercase_ , lowercase_ : str = 0, 0
lowercase_ : Optional[int] = n - i
lowercase_ : Any = memo.get(__SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowercase_ : List[str] = sub_memo.get(__SCREAMING_SNAKE_CASE )
if jumps is not None and len(__SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowercase_ : Optional[Any] = -1
for _k in range(len(__SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase_ : List[str] = _k
break
if max_jump >= 0:
lowercase_ , lowercase_ , lowercase_ : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase_ : List[Any] = diff + c
for j in range(min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ):
lowercase_ , lowercase_ : Optional[int] = divmod(__SCREAMING_SNAKE_CASE , 10 )
if new_c > 0:
add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase_ : Dict = []
else:
lowercase_ : List[Any] = {c: []}
lowercase_ : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase_ , lowercase_ : Union[str, Any] = next_term(__SCREAMING_SNAKE_CASE , k - 1 , i + dn , __SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase_ , lowercase_ : List[str] = compute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + dn , __SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowercase_ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase_ : Union[str, Any] = 0
while j < len(__SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ):
if i >= n:
return 0, i
if k > len(__SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(__SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase_ : str = i
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = 0, 0, 0
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase_ : Tuple = ds_c + ds_b
diff += addend
lowercase_ : Tuple = 0
for j in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = a_i[j] + addend
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return diff, i - start_i
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Optional[int] = digits[j] + addend
if s >= 10:
lowercase_ , lowercase_ : str = divmod(__SCREAMING_SNAKE_CASE , 10 )
lowercase_ : Optional[int] = addend // 10 + quotient
else:
lowercase_ : Optional[int] = s
lowercase_ : Any = addend // 10
if addend == 0:
break
while addend > 0:
lowercase_ , lowercase_ : str = divmod(__SCREAMING_SNAKE_CASE , 10 )
digits.append(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int = 10**15 ):
lowercase_ : Dict = [1]
lowercase_ : Any = 1
lowercase_ : List[Any] = 0
while True:
lowercase_ , lowercase_ : Tuple = next_term(__SCREAMING_SNAKE_CASE , 20 , i + dn , __SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowercase_ : List[str] = 0
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 425
| 0
|
'''simple docstring'''
def a ( A__ : list ) -> list:
"""simple docstring"""
_lowercase =len(A__ )
for i in range(1 , A__ ):
_lowercase =collection[i]
_lowercase =0
_lowercase =i - 1
while low <= high:
_lowercase =(low + high) // 2
if val < collection[mid]:
_lowercase =mid - 1
else:
_lowercase =mid + 1
for j in range(A__ , A__ , -1 ):
_lowercase =collection[j - 1]
_lowercase =val
return collection
if __name__ == "__main__":
lowercase_ = input('Enter numbers separated by a comma:\n').strip()
lowercase_ = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 715
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowercase_ = logging.getLogger(__name__)
lowercase_ = {'facebook/bart-base': BartForConditionalGeneration}
lowercase_ = {'facebook/bart-base': BartTokenizer}
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=A__ , default=A__ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=A__ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=A__ , default=A__ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=A__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--config_name' , type=A__ , default=A__ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=A__ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=A__ , default=A__ , help='Where to store the final ONNX file.' )
_lowercase =parser.parse_args()
return args
def a ( A__ : int , A__ : Optional[int]="cpu" ) -> Optional[int]:
"""simple docstring"""
_lowercase =model_dict[model_name].from_pretrained(A__ ).to(A__ )
_lowercase =tokenizer_dict[model_name].from_pretrained(A__ )
if model_name in ["facebook/bart-base"]:
_lowercase =0
_lowercase =None
_lowercase =0
return huggingface_model, tokenizer
def a ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] , A__ : Dict , A__ : Tuple ) -> List[str]:
"""simple docstring"""
model.eval()
_lowercase =None
_lowercase =torch.jit.script(BARTBeamSearchGenerator(A__ ) )
with torch.no_grad():
_lowercase ='My friends are cool but they eat too many carbs.'
_lowercase =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device )
_lowercase =model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=A__ , max_length=A__ , early_stopping=A__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
A__ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , A__ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=A__ , )
logger.info('Model exported to {}'.format(A__ ) )
_lowercase =remove_dup_initializers(os.path.abspath(A__ ) )
logger.info('Deduplicated and optimized model written to {}'.format(A__ ) )
_lowercase =onnxruntime.InferenceSession(A__ )
_lowercase =ort_sess.run(
A__ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(A__ ),
'max_length': np.array(A__ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def a ( ) -> int:
"""simple docstring"""
_lowercase =parse_args()
_lowercase =5
_lowercase =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase =torch.device(args.device )
_lowercase , _lowercase =load_model_tokenizer(args.model_name_or_path , A__ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(A__ )
if args.max_length:
_lowercase =args.max_length
if args.num_beams:
_lowercase =args.num_beams
if args.output_file_path:
_lowercase =args.output_file_path
else:
_lowercase ='BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(A__ , A__ , A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 380
| 0
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_: int =['text', 'image', 'audio']
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( snake_case_ : List ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __A :
def _lowercase (self : Optional[Any] ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
UpperCAmelCase_ = self.tool.inputs
for _input in inputs:
if isinstance(_input , __a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = self.tool(*__a )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase_ = [outputs]
self.assertListEqual(output_types(__a ) , self.tool.outputs )
def _lowercase (self : str ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = self.tool(*__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
for output, output_type in zip(__a , self.tool.outputs ):
UpperCAmelCase_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a , __a ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = []
for _input, input_type in zip(__a , self.tool.inputs ):
if isinstance(__a , __a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase_ = self.tool(*__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
| 78
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=5 ):
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
lowercase__ : Tuple = torch.tensor(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
lowercase__ : str = model(_lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
lowercase__ : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase__ : Optional[int] = logits[0, masked_index, :]
lowercase__ : Dict = logits.softmax(dim=0 )
lowercase__ , lowercase__ : Optional[Any] = prob.topk(k=_lowerCAmelCase , dim=0 )
lowercase__ : Dict = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCAmelCase ) )] )
lowercase__ : int = tokenizer.mask_token
lowercase__ : Union[str, Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
lowercase__ : List[str] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_lowerCAmelCase ) , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCAmelCase , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_UpperCamelCase : Tuple = CamembertTokenizer.from_pretrained("camembert-base")
_UpperCamelCase : List[Any] = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_UpperCamelCase : Tuple = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCAmelCase : Any = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
if isinstance(_UpperCamelCase , torch.Tensor ):
return image
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
__UpperCAmelCase : List[Any] = [image]
__UpperCAmelCase : Optional[int] = [trans(img.convert("""RGB""" ) ) for img in image]
__UpperCAmelCase : List[Any] = torch.stack(_UpperCamelCase )
return image
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = min(int(num_inference_steps * strength ) , UpperCamelCase )
__UpperCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int]=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase )}''' )
__UpperCAmelCase : Optional[Any] = image.to(device=UpperCamelCase , dtype=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase : List[Any] = init_latents.shape
__UpperCAmelCase : Optional[Any] = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
# get latents
print("""add noise to latents at timestep""" , UpperCamelCase )
__UpperCAmelCase : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCamelCase : float = 0.8 , UpperCamelCase : int = 1 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 50 , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ):
'''simple docstring'''
self.check_inputs(UpperCamelCase )
# 2. Preprocess image
__UpperCAmelCase : int = preprocess(UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase , device=self.device )
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.get_timesteps(UpperCamelCase , UpperCamelCase , self.device )
__UpperCAmelCase : int = timesteps[:1].repeat(UpperCamelCase )
# 4. Prepare latent variables
__UpperCAmelCase : Optional[Any] = self.prepare_latents(UpperCamelCase , UpperCamelCase , UpperCamelCase , self.unet.dtype , self.device , UpperCamelCase )
__UpperCAmelCase : Optional[int] = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase ):
# 1. predict noise model_output
__UpperCAmelCase : Optional[Any] = self.unet(UpperCamelCase , UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase : Optional[Any] = self.scheduler.step(
UpperCamelCase , UpperCamelCase , UpperCamelCase , eta=UpperCamelCase , use_clipped_model_output=UpperCamelCase , generator=UpperCamelCase , ).prev_sample
__UpperCAmelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase )
| 139
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : int = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : int , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : Tuple ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Any = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : int = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : Optional[int] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : str = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : str = inputs["""input_ids"""]
__UpperCAmelCase : Any = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : str = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : Any = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 139
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase :List[str] = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : str ) -> List[List[ImageInput]]:
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =['''pixel_values''']
def __init__( self: List[str] , __a: bool = True , __a: Dict[str, int] = None , __a: PILImageResampling = PILImageResampling.BILINEAR , __a: bool = True , __a: Dict[str, int] = None , __a: bool = True , __a: Union[int, float] = 1 / 255 , __a: bool = True , __a: bool = True , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , **__a: int , )-> None:
super().__init__(**__a )
lowerCamelCase : int = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase : Tuple = get_size_dict(__a , param_name="""crop_size""" )
lowerCamelCase : str = do_resize
lowerCamelCase : List[str] = size
lowerCamelCase : Optional[Any] = do_center_crop
lowerCamelCase : List[Any] = crop_size
lowerCamelCase : Dict = resample
lowerCamelCase : Tuple = do_rescale
lowerCamelCase : List[str] = rescale_factor
lowerCamelCase : str = offset
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self: List[str] , __a: np.ndarray , __a: Dict[str, int] , __a: PILImageResampling = PILImageResampling.BILINEAR , __a: Optional[Union[str, ChannelDimension]] = None , **__a: List[str] , )-> np.ndarray:
lowerCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
lowerCamelCase : Union[str, Any] = get_resize_output_image_size(__a , size["""shortest_edge"""] , default_to_square=__a )
elif "height" in size and "width" in size:
lowerCamelCase : Optional[int] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def a__ ( self: Optional[int] , __a: np.ndarray , __a: Dict[str, int] , __a: Optional[Union[str, ChannelDimension]] = None , **__a: List[Any] , )-> np.ndarray:
lowerCamelCase : Optional[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def a__ ( self: Dict , __a: np.ndarray , __a: Union[int, float] , __a: bool = True , __a: Optional[Union[str, ChannelDimension]] = None , **__a: Optional[Any] , )-> Union[str, Any]:
lowerCamelCase : Tuple = image.astype(np.floataa )
if offset:
lowerCamelCase : str = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def a__ ( self: Any , __a: np.ndarray , __a: Union[float, List[float]] , __a: Union[float, List[float]] , __a: Optional[Union[str, ChannelDimension]] = None , **__a: List[str] , )-> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def a__ ( self: Union[str, Any] , __a: ImageInput , __a: bool = None , __a: Dict[str, int] = None , __a: PILImageResampling = None , __a: bool = None , __a: Dict[str, int] = None , __a: bool = None , __a: float = None , __a: bool = None , __a: bool = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[ChannelDimension] = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : int = to_numpy_array(__a )
if do_resize:
lowerCamelCase : Union[str, Any] = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
lowerCamelCase : List[str] = self.center_crop(__a , size=__a )
if do_rescale:
lowerCamelCase : int = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
lowerCamelCase : List[Any] = self.normalize(image=__a , mean=__a , std=__a )
lowerCamelCase : Optional[int] = to_channel_dimension_format(__a , __a )
return image
def a__ ( self: Optional[Any] , __a: ImageInput , __a: bool = None , __a: Dict[str, int] = None , __a: PILImageResampling = None , __a: bool = None , __a: Dict[str, int] = None , __a: bool = None , __a: float = None , __a: bool = None , __a: bool = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[str, TensorType]] = None , __a: ChannelDimension = ChannelDimension.FIRST , **__a: Dict , )-> PIL.Image.Image:
lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = resample if resample is not None else self.resample
lowerCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Any = offset if offset is not None else self.offset
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Optional[int] = size if size is not None else self.size
lowerCamelCase : List[str] = get_size_dict(__a , default_to_square=__a )
lowerCamelCase : Dict = crop_size if crop_size is not None else self.crop_size
lowerCamelCase : int = get_size_dict(__a , param_name="""crop_size""" )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCamelCase : int = make_batched(__a )
lowerCamelCase : Union[str, Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
lowerCamelCase : List[Any] = {"""pixel_values""": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 222
|
"""simple docstring"""
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 222
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__A =5_0_0_0_3
__A =5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( __UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def snake_case__ ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Union[str, Any] = PLBartTokenizer(a_ , language_codes='''base''' , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = PLBartTokenizer(a_ , language_codes='''base''' , keep_accents=a_ )
__UpperCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__UpperCAmelCase : str = tokenizer.vocab_size
__UpperCAmelCase : Optional[int] = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 4 , a_ )]
self.assertListEqual(a_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__UpperCAmelCase : Optional[int] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__UpperCAmelCase : Tuple = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) , a_ , )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = PLBartTokenizer(a_ , language_codes='''multi''' , keep_accents=a_ )
__UpperCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__UpperCAmelCase : List[str] = tokenizer.vocab_size
__UpperCAmelCase : List[str] = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 7 , a_ )]
self.assertListEqual(
a_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__UpperCAmelCase : Any = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__UpperCAmelCase : List[str] = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) , a_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """uclanlp/plbart-python-en_XX"""
UpperCamelCase = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
UpperCamelCase = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
UpperCamelCase = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def snake_case__ ( cls : List[str] ):
'''simple docstring'''
__UpperCAmelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__UpperCAmelCase : Optional[Any] = 1
return cls
def snake_case__ ( self : str ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a_ )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
self.assertIn(a_ , self.tokenizer.all_special_ids )
__UpperCAmelCase : Optional[int] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
__UpperCAmelCase : Tuple = self.tokenizer.decode(a_ , skip_special_tokens=a_ )
__UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
self.assertNotIn(self.tokenizer.eos_token , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , a_ )
__UpperCAmelCase : Dict = 10
__UpperCAmelCase : Any = self.tokenizer(a_ , max_length=a_ , truncation=a_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a_ )
self.assertEqual(len(a_ ) , a_ )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
__UpperCAmelCase : Dict = PLBartTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a_ )
@require_torch
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a_ , return_tensors='''pt''' )
__UpperCAmelCase : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__UpperCAmelCase : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=a_ , truncation=a_ , max_length=3 , return_tensors='''pt''' )
__UpperCAmelCase : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=10 , return_tensors='''pt''' )
__UpperCAmelCase : Tuple = targets['''input_ids''']
__UpperCAmelCase : List[str] = shift_tokens_right(a_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(a_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , )
| 241
|
def a ( _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 10_00 ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Dict = 0
for divide_by_number in range(_UpperCAmelCase , digit + 1 ):
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : List[str] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_UpperCAmelCase )
__UpperCAmelCase : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A__ : int = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
A__ : List[str] = {
'''camembert-base''': 5_1_2,
}
A__ : str = '''▁'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __a : int , __a : str="<s>" , __a : Dict="</s>" , __a : Union[str, Any]="</s>" , __a : List[str]="<s>" , __a : Optional[int]="<unk>" , __a : Optional[Any]="<pad>" , __a : str="<mask>" , __a : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , __a : Optional[Dict[str, Any]] = None , **__a : Any , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__snake_case : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__snake_case : Any = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__snake_case : Optional[int] = len(self.fairseq_tokens_to_ids )
__snake_case : str = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__snake_case : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A_ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def A_ ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : Dict = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Dict , __a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def A_ ( self : Optional[Any] , __a : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def A_ ( self : Optional[int] , __a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self : Dict , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = []
__snake_case : List[Any] = ''
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__snake_case : Optional[int] = True
__snake_case : Any = []
else:
current_sub_tokens.append(__a )
__snake_case : Dict = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = self.__dict__.copy()
__snake_case : int = None
return state
def __setstate__( self : List[str] , __a : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Dict = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : Dict = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , 'wb' ) as fi:
__snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 286
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''cvt'''
def __init__( self : Optional[Any] , __a : Union[str, Any]=3 , __a : List[Any]=[7, 3, 3] , __a : Optional[int]=[4, 2, 2] , __a : Dict=[2, 1, 1] , __a : Union[str, Any]=[64, 192, 384] , __a : int=[1, 3, 6] , __a : List[str]=[1, 2, 10] , __a : Optional[Any]=[4.0, 4.0, 4.0] , __a : Any=[0.0, 0.0, 0.0] , __a : List[str]=[0.0, 0.0, 0.0] , __a : List[Any]=[0.0, 0.0, 0.1] , __a : List[str]=[True, True, True] , __a : int=[False, False, True] , __a : Dict=["dw_bn", "dw_bn", "dw_bn"] , __a : List[str]=[3, 3, 3] , __a : Union[str, Any]=[1, 1, 1] , __a : Optional[int]=[2, 2, 2] , __a : Optional[Any]=[1, 1, 1] , __a : List[str]=[1, 1, 1] , __a : List[str]=0.0_2 , __a : List[str]=1e-12 , **__a : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**__a )
__snake_case : int = num_channels
__snake_case : Union[str, Any] = patch_sizes
__snake_case : Any = patch_stride
__snake_case : List[str] = patch_padding
__snake_case : Optional[Any] = embed_dim
__snake_case : Union[str, Any] = num_heads
__snake_case : Dict = depth
__snake_case : Optional[Any] = mlp_ratio
__snake_case : List[str] = attention_drop_rate
__snake_case : Optional[int] = drop_rate
__snake_case : Optional[int] = drop_path_rate
__snake_case : Any = qkv_bias
__snake_case : int = cls_token
__snake_case : Optional[int] = qkv_projection_method
__snake_case : List[Any] = kernel_qkv
__snake_case : List[Any] = padding_kv
__snake_case : int = stride_kv
__snake_case : List[str] = padding_q
__snake_case : Dict = stride_q
__snake_case : Tuple = initializer_range
__snake_case : List[Any] = layer_norm_eps
| 286
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = val
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace('backbone.0.body', 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:2_5_6, :]
SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:2_5_6]
SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-2_5_6:, :]
SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-2_5_6:]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
SCREAMING_SNAKE_CASE = max(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
SCREAMING_SNAKE_CASE = target_max_size / current_max_size
SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = F.to_tensor(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = F.normalize(SCREAMING_SNAKE_CASE_, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_, map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(SCREAMING_SNAKE_CASE_ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = TableTransformerConfig(
backbone='resnet18', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_5
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = {0: 'table', 1: 'table rotated'}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE = 1_2_5
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = DetrImageProcessor(
format='coco_detection', max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id='nielsr/example-pdf', repo_type='dataset', filename=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = Image.open(SCREAMING_SNAKE_CASE_ ).convert('RGB' )
SCREAMING_SNAKE_CASE = normalize(resize(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE = (1, 1_5, 3)
SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
SCREAMING_SNAKE_CASE = (1, 1_2_5, 7)
SCREAMING_SNAKE_CASE = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 406
|
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
snake_case = 5
snake_case = 1_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str = SpeechaTextTokenizer
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[int] = True
def A ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = sp.SentencePieceProcessor()
spm_model.Load(lowercase__ )
SCREAMING_SNAKE_CASE = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowercase__ ) )]
SCREAMING_SNAKE_CASE = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowercase__ ) , 1001 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [289, 50, 14, 174, 386] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "valhalla/s2t_mustc_multilinguial_medium"
UpperCAmelCase_ : Optional[Any] = "C'est trop cool"
UpperCAmelCase_ : List[str] = "Esto es genial"
@classmethod
def A ( cls ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def A ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE = [ES_CODE, 4, 1601, 47, 7647, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
SCREAMING_SNAKE_CASE = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowercase__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 406
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase = """ViTImageProcessor"""
_UpperCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : Tuple = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 101
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: int = 'openai-gpt'
SCREAMING_SNAKE_CASE: List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase__=40_478 , lowerCamelCase__=512 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0_2 , lowerCamelCase__="cls_index" , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=0.1 , **lowerCamelCase__ , ):
lowerCAmelCase_: Union[str, Any] = vocab_size
lowerCAmelCase_: List[Any] = n_positions
lowerCAmelCase_: Tuple = n_embd
lowerCAmelCase_: Optional[int] = n_layer
lowerCAmelCase_: Optional[int] = n_head
lowerCAmelCase_: int = afn
lowerCAmelCase_: str = resid_pdrop
lowerCAmelCase_: Optional[int] = embd_pdrop
lowerCAmelCase_: Optional[int] = attn_pdrop
lowerCAmelCase_: Dict = layer_norm_epsilon
lowerCAmelCase_: List[Any] = initializer_range
lowerCAmelCase_: Union[str, Any] = summary_type
lowerCAmelCase_: Any = summary_use_proj
lowerCAmelCase_: Dict = summary_activation
lowerCAmelCase_: Dict = summary_first_dropout
lowerCAmelCase_: List[Any] = summary_proj_to_labels
super().__init__(**lowerCamelCase__ )
| 613
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_a : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
__UpperCAmelCase : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
__UpperCAmelCase : List[str] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
__UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Dict = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = fairseq_model.state_dict()
__UpperCAmelCase : Union[str, Any] = hf_model.feature_extractor
__UpperCAmelCase : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
__UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
__UpperCAmelCase : int = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
__UpperCAmelCase : int = name.split(lowerCamelCase__ )[0].split("." )[-2]
__UpperCAmelCase : Any = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
__UpperCAmelCase : Dict = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Optional[int] = "weight_v"
elif "bias" in name:
__UpperCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
__UpperCAmelCase : Optional[Any] = "weight"
else:
__UpperCAmelCase : Dict = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : Dict = name.split("." )
__UpperCAmelCase : List[Any] = int(items[0] )
__UpperCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = full_name.split("adaptor." )[-1]
__UpperCAmelCase : Optional[int] = name.split("." )
if items[1].isdigit():
__UpperCAmelCase : Union[str, Any] = int(items[1] )
else:
__UpperCAmelCase : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__UpperCAmelCase : Optional[Any] = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__UpperCAmelCase : List[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__UpperCAmelCase : str = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__UpperCAmelCase : List[str] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__UpperCAmelCase : List[Any] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__UpperCAmelCase : Dict = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = emb.weight.shape
__UpperCAmelCase : str = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
__UpperCAmelCase : Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(
lowerCamelCase__ , add_adapter=lowerCamelCase__ , adapter_stride=lowerCamelCase__ , adapter_kernel_size=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , output_hidden_size=lowerCamelCase__ , )
__UpperCAmelCase : int = MBartConfig.from_pretrained(lowerCamelCase__ )
# load model
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__UpperCAmelCase : int = model[0].eval()
# load feature extractor
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , use_auth_token=lowerCamelCase__ )
# set weights for wav2vec2 encoder
__UpperCAmelCase : Tuple = WavaVecaModel(lowerCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ )
# load decoder weights
__UpperCAmelCase : List[Any] = MBartForCausalLM(lowerCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__UpperCAmelCase : int = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Union[str, Any] = MBartaaTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = hf_wavavec.config.to_dict()
__UpperCAmelCase : int = tokenizer.pad_token_id
__UpperCAmelCase : Union[str, Any] = tokenizer.bos_token_id
__UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__UpperCAmelCase : Tuple = "mbart50"
__UpperCAmelCase : Tuple = "wav2vec2"
__UpperCAmelCase : int = tokenizer.eos_token_id
__UpperCAmelCase : Any = 25_0004
__UpperCAmelCase : int = tokenizer.eos_token_id
__UpperCAmelCase : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
_a : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 10
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a photo of {}." ):
"""simple docstring"""
_lowerCAmelCase = load_image(_lowercase )
_lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
_lowerCAmelCase = probs.tolist()
if not isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = [scores]
elif self.framework == "tf":
_lowerCAmelCase = stable_softmax(_lowercase , axis=-1 )
_lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
def __init__( self : Dict , __A : Optional[int] , __A : int=2 , __A : str=True , __A : List[Any]=False , __A : List[str]=10 , __A : Union[str, Any]=3 , __A : Dict=32 * 8 , __A : str=32 * 8 , __A : int=4 , __A : List[str]=64 , ) ->Tuple:
"""simple docstring"""
a__ :Optional[Any] = parent
a__ :Dict = batch_size
a__ :str = is_training
a__ :Optional[int] = use_auxiliary_loss
a__ :str = num_queries
a__ :int = num_channels
a__ :Optional[int] = min_size
a__ :Optional[Any] = max_size
a__ :Dict = num_labels
a__ :Union[str, Any] = hidden_dim
a__ :Any = hidden_dim
def _snake_case ( self : Tuple ) ->List[str]:
"""simple docstring"""
a__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
a__ :Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A )
a__ :Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5
).float()
a__ :List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long()
a__ :Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[str] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ :List[str] = self.num_queries
a__ :Optional[int] = self.num_labels
a__ :Tuple = [1, 1, 1, 1]
a__ :Dict = self.num_channels
a__ :Optional[Any] = 64
a__ :Union[str, Any] = 128
a__ :Optional[Any] = self.hidden_dim
a__ :int = self.hidden_dim
a__ :List[str] = self.hidden_dim
return config
def _snake_case ( self : Any ) ->Dict:
"""simple docstring"""
a__ , a__ , a__ , a__ , a__ :int = self.prepare_config_and_inputs()
a__ :Optional[int] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _snake_case ( self : int , __A : Union[str, Any] , __A : Tuple ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = output.encoder_hidden_states
a__ :List[Any] = output.pixel_decoder_hidden_states
a__ :Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , config.decoder_layers )
def _snake_case ( self : Dict , __A : List[str] , __A : Tuple , __A : Union[str, Any] , __A : Dict=False ) ->Any:
"""simple docstring"""
with torch.no_grad():
a__ :Dict = MaskaFormerModel(config=__A )
model.to(__A )
model.eval()
a__ :Tuple = model(pixel_values=__A , pixel_mask=__A )
a__ :Dict = model(__A , output_hidden_states=__A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A , __A )
def _snake_case ( self : Optional[int] , __A : int , __A : str , __A : Tuple , __A : List[Any] , __A : List[str] ) ->Any:
"""simple docstring"""
a__ :Dict = MaskaFormerForUniversalSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a__ :Union[str, Any] = model(pixel_values=__A , pixel_mask=__A )
a__ :List[Any] = model(__A )
comm_check_on_output(__A )
a__ :str = model(
pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase_ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self : Tuple ) ->Dict:
"""simple docstring"""
a__ :List[str] = MaskaFormerModelTester(self )
a__ :List[str] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def _snake_case ( self : Tuple ) ->str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
a__ , a__ :str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__A , **__A , output_hidden_states=__A )
def _snake_case ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _snake_case ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _snake_case ( self : int ) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _snake_case ( self : Any ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _snake_case ( self : str ) ->str:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : Optional[int] ) ->Any:
"""simple docstring"""
pass
def _snake_case ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a__ , a__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ :Any = model_class(__A )
a__ :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ :Optional[Any] = [*signature.parameters.keys()]
a__ :str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
@slow
def _snake_case ( self : Dict ) ->str:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ :Any = MaskaFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = (self.model_tester.min_size,) * 2
a__ :Optional[int] = {
"pixel_values": torch.randn((2, 3, *size) , device=__A ),
"mask_labels": torch.randn((2, 10, *size) , device=__A ),
"class_labels": torch.zeros(2 , 10 , device=__A ).long(),
}
a__ :Dict = self.model_tester.get_config()
a__ :str = MaskaFormerForUniversalSegmentation(__A ).to(__A )
a__ :int = model(**__A )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self : List[str] ) ->Any:
"""simple docstring"""
a__ , a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__A , **__A , output_hidden_states=__A )
def _snake_case ( self : Tuple ) ->Tuple:
"""simple docstring"""
a__ , a__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ :Union[str, Any] = model_class(__A ).to(__A )
a__ :int = model(**__A , output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self : Any ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
a__ :str = self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ :Any = self.model_tester.prepare_config_and_inputs()
a__ :Optional[Any] = model_class(__A )
model.to(__A )
model.train()
a__ :Any = model(__A , mask_labels=__A , class_labels=__A ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a__ :List[str] = self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ :int = self.model_tester.prepare_config_and_inputs()
a__ :Optional[Any] = True
a__ :Optional[int] = True
a__ :List[Any] = model_class(__A ).to(__A )
model.train()
a__ :Optional[Any] = model(__A , mask_labels=__A , class_labels=__A )
a__ :int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ :List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ :List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ :Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ = 1e-4
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
a__ :Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self : int ) ->Dict:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self : str ) ->int:
"""simple docstring"""
a__ :Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__A )
a__ :List[Any] = self.default_image_processor
a__ :str = prepare_img()
a__ :Union[str, Any] = image_processor(__A , return_tensors="pt" ).to(__A )
a__ :Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 384, 384) )
with torch.no_grad():
a__ :Optional[int] = model(**__A )
a__ :str = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
a__ :Dict = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
a__ :Union[str, Any] = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) )
def _snake_case ( self : Any ) ->Dict:
"""simple docstring"""
a__ :Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__A ).eval()
a__ :Tuple = self.default_image_processor
a__ :Any = prepare_img()
a__ :str = image_processor(__A , return_tensors="pt" ).to(__A )
a__ :Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 384, 384) )
with torch.no_grad():
a__ :int = model(**__A )
# masks_queries_logits
a__ :Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a__ :Dict = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
a__ :Any = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
a__ :Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a__ :Optional[int] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def _snake_case ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a__ :List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__A ).eval()
a__ :Tuple = self.default_image_processor
a__ :str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a__ :Tuple = inputs["pixel_values"].to(__A )
a__ :List[Any] = [el.to(__A ) for el in inputs["mask_labels"]]
a__ :List[str] = [el.to(__A ) for el in inputs["class_labels"]]
with torch.no_grad():
a__ :List[str] = model(**__A )
self.assertTrue(outputs.loss is not None )
| 395
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase = '''base_with_context'''
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
_SCREAMING_SNAKE_CASE = weights[F'layers_{lyr_num}']
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = ly_weight["""attention"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
_SCREAMING_SNAKE_CASE = weights[F'layers_{lyr_num}']
_SCREAMING_SNAKE_CASE = ly_weight["""attention"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=snake_case__ )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_SCREAMING_SNAKE_CASE = weights[F'layers_{lyr_num}']
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = ly_weight["""self_attention"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = ly_weight["""MultiHeadDotProductAttention_0"""]
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_SCREAMING_SNAKE_CASE = jnp.tree_util.tree_map(onp.array ,snake_case__ )
_SCREAMING_SNAKE_CASE = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_SCREAMING_SNAKE_CASE = os.path.join(args.checkpoint_path ,"""..""" ,"""config.gin""" )
_SCREAMING_SNAKE_CASE = inference.parse_training_gin_file(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = inference.InferenceModel(args.checkpoint_path ,snake_case__ )
_SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ,variance_type="""fixed_large""" )
_SCREAMING_SNAKE_CASE = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
_SCREAMING_SNAKE_CASE = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["""targets_context"""] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
_SCREAMING_SNAKE_CASE = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["""targets_context"""] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
_SCREAMING_SNAKE_CASE = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] ,snake_case__ )
_SCREAMING_SNAKE_CASE = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] ,snake_case__ )
_SCREAMING_SNAKE_CASE = load_decoder(ta_checkpoint["""target"""]["""decoder"""] ,snake_case__ )
_SCREAMING_SNAKE_CASE = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_SCREAMING_SNAKE_CASE = SpectrogramDiffusionPipeline(
notes_encoder=snake_case__ ,continuous_encoder=snake_case__ ,decoder=snake_case__ ,scheduler=snake_case__ ,melgan=snake_case__ ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCamelCase = parser.parse_args()
main(args)
| 569
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 569
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Dict = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: int | str ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = str(lowerCAmelCase__ )
return n == n[::-1]
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: int = 0
for i in range(1 , lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 556
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80
|
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209
|
'''simple docstring'''
import numpy as np
import qiskit
def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.random.default_rng(seed=UpperCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ :Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rng.integers(2 , size=UpperCAmelCase__ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ :List[Any] = rng.integers(2 , size=UpperCAmelCase__ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ :str = rng.integers(2 , size=UpperCAmelCase__ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ :int = qiskit.QuantumCircuit(UpperCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ :str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ :int = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ :List[Any] = job.result().get_counts(UpperCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ :Any = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ :Optional[Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 209
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = ['''image_processor''', '''tokenizer''']
UpperCamelCase : Union[str, Any] = '''ChineseCLIPImageProcessor'''
UpperCamelCase : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _A=None , _A=None , **_A ):
__A : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__A : Optional[Any] = kwargs.pop('feature_extractor' )
__A : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
__A : Dict = self.image_processor
def __call__( self , _A=None , _A=None , _A=None , **_A ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__A : Dict = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
__A : Optional[Any] = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
__A : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def UpperCAmelCase_ ( self , *_A , **_A ):
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self , *_A , **_A ):
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer.model_input_names
__A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
| 77
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a ( snake_case__: str , snake_case__: str ):
'''simple docstring'''
lowercase_ = list(snake_case__ )
lowercase_ = list(snake_case__ )
lowercase_ = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
lowercase_ = '''_'''
if count > 1:
return False
else:
return "".join(snake_case__ )
def a ( snake_case__: list[str] ):
'''simple docstring'''
lowercase_ = []
while True:
lowercase_ = ['''$'''] * len(snake_case__ )
lowercase_ = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
lowercase_ = compare_string(binary[i] , binary[j] )
if k is False:
lowercase_ = '''*'''
lowercase_ = '''*'''
temp.append('''X''' )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
lowercase_ = list(set(snake_case__ ) )
def a ( snake_case__: int , snake_case__: Sequence[float] ):
'''simple docstring'''
lowercase_ = []
for minterm in minterms:
lowercase_ = ''''''
for _ in range(snake_case__ ):
lowercase_ = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def a ( snake_case__: str , snake_case__: str , snake_case__: int ):
'''simple docstring'''
lowercase_ = list(snake_case__ )
lowercase_ = list(snake_case__ )
lowercase_ = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a ( snake_case__: list[list[int]] , snake_case__: list[str] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
lowercase_ = 0
lowercase_ = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
lowercase_ = j
if count == 1:
lowercase_ = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
lowercase_ = 0
temp.append(prime_implicants[i] )
while True:
lowercase_ = 0
lowercase_ = -1
lowercase_ = 0
for i in range(len(snake_case__ ) ):
lowercase_ = chart[i].count(1 )
if count_n > max_n:
lowercase_ = count_n
lowercase_ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
lowercase_ = 0
def a ( snake_case__: list[str] , snake_case__: list[str] ):
'''simple docstring'''
lowercase_ = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
lowercase_ = prime_implicants[i].count('''_''' )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
lowercase_ = 1
return chart
def a ( ):
'''simple docstring'''
lowercase_ = int(input('''Enter the no. of variables\n''' ) )
lowercase_ = [
float(snake_case__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowercase_ = decimal_to_binary(snake_case__ , snake_case__ )
lowercase_ = check(snake_case__ )
print('''Prime Implicants are:''' )
print(snake_case__ )
lowercase_ = prime_implicant_chart(snake_case__ , snake_case__ )
lowercase_ = selection(snake_case__ , snake_case__ )
print('''Essential Prime Implicants are:''' )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 97
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : List[str] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
| 0
|
'''simple docstring'''
from math import factorial
__snake_case :List[Any] = {str(d): factorial(d) for d in range(10)}
def __snake_case ( _UpperCAmelCase ):
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCAmelCase ) )
def __snake_case ( ):
__a = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCAmelCase ) if sum_of_digit_factorial(_UpperCAmelCase ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 715
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=a ):
'''simple docstring'''
_UpperCamelCase = ['torch', 'transformers', 'onnx']
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ (metaclass=a ):
'''simple docstring'''
_UpperCamelCase = ['torch', 'transformers', 'onnx']
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ (metaclass=a ):
'''simple docstring'''
_UpperCamelCase = ['torch', 'transformers', 'onnx']
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ (metaclass=a ):
'''simple docstring'''
_UpperCamelCase = ['torch', 'transformers', 'onnx']
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ (metaclass=a ):
'''simple docstring'''
_UpperCamelCase = ['torch', 'transformers', 'onnx']
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ (metaclass=a ):
'''simple docstring'''
_UpperCamelCase = ['torch', 'transformers', 'onnx']
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
| 50
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : Optional[int] = torch.exp(__snake_case )
__lowerCAmelCase : int = torch.sum(__snake_case ,dim=1 ) # sum of exp(x_i)
__lowerCAmelCase : Optional[Any] = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Any = config.output_attentions
__lowerCAmelCase : Optional[Any] = config.output_hidden_states
__lowerCAmelCase : Tuple = nn.ModuleList([BertLayer(_SCREAMING_SNAKE_CASE) for _ in range(config.num_hidden_layers)])
__lowerCAmelCase : int = nn.ModuleList([BertHighway(_SCREAMING_SNAKE_CASE) for _ in range(config.num_hidden_layers)])
__lowerCAmelCase : List[str] = [-1 for _ in range(config.num_hidden_layers)]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
if (type(_SCREAMING_SNAKE_CASE) is float) or (type(_SCREAMING_SNAKE_CASE) is int):
for i in range(len(self.early_exit_entropy)):
__lowerCAmelCase : List[Any] = x
else:
__lowerCAmelCase : str = x
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Optional[Any] = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
__lowerCAmelCase : Tuple = all_hidden_states + (hidden_states,)
__lowerCAmelCase : Any = layer_module(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , head_mask[i] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = layer_outputs[0]
if self.output_attentions:
__lowerCAmelCase : List[Any] = all_attentions + (layer_outputs[1],)
__lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase : List[str] = current_outputs + (all_attentions,)
__lowerCAmelCase : List[str] = self.highway[i](_SCREAMING_SNAKE_CASE)
# logits, pooled_output
if not self.training:
__lowerCAmelCase : Union[str, Any] = highway_exit[0]
__lowerCAmelCase : Dict = entropy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCAmelCase : Any = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_SCREAMING_SNAKE_CASE , i + 1)
else:
__lowerCAmelCase : Optional[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCAmelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
__lowerCAmelCase : str = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase : Optional[int] = outputs + (all_attentions,)
__lowerCAmelCase : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = config
__lowerCAmelCase : str = BertEmbeddings(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = DeeBertEncoder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = BertPooler(_SCREAMING_SNAKE_CASE)
self.init_weights()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler)
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
return self.embeddings.word_embeddings
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = value
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_SCREAMING_SNAKE_CASE)
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , ) -> Dict:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
__lowerCAmelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
__lowerCAmelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase : Union[str, Any] = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
if encoder_attention_mask is None:
__lowerCAmelCase : Tuple = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
if token_type_ids is None:
__lowerCAmelCase : Union[str, Any] = torch.zeros(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCAmelCase : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCAmelCase : Optional[Any] = encoder_attention_mask[:, None, None, :]
__lowerCAmelCase : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
__lowerCAmelCase : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase : Union[str, Any] = self.get_head_mask(_SCREAMING_SNAKE_CASE , self.config.num_hidden_layers)
__lowerCAmelCase : Union[str, Any] = self.embeddings(
input_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.encoder(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_outputs[0]
__lowerCAmelCase : Union[str, Any] = self.pooler(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = message
__lowerCAmelCase : Union[str, Any] = exit_layer # start from 1!
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = BertPooler(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = encoder_outputs[0]
__lowerCAmelCase : Union[str, Any] = self.pooler(_SCREAMING_SNAKE_CASE)
# "return" pooler_output
# BertModel
__lowerCAmelCase : str = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCAmelCase : Tuple = bmodel_output[1]
__lowerCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE)
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = config.num_labels
__lowerCAmelCase : Tuple = config.num_hidden_layers
__lowerCAmelCase : Optional[Any] = DeeBertModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase : Dict = nn.Linear(config.hidden_size , self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=-1 , _SCREAMING_SNAKE_CASE: str=False , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.num_layers
try:
__lowerCAmelCase : Optional[int] = self.bert(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCAmelCase : Tuple = outputs[1]
__lowerCAmelCase : Union[str, Any] = self.dropout(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.classifier(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase : str = e.message
__lowerCAmelCase : Optional[Any] = e.exit_layer
__lowerCAmelCase : Optional[Any] = outputs[0]
if not self.training:
__lowerCAmelCase : int = entropy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase : Optional[int] = MSELoss()
__lowerCAmelCase : List[Any] = loss_fct(logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase : Optional[int] = CrossEntropyLoss()
__lowerCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__lowerCAmelCase : List[str] = []
for highway_exit in outputs[-1]:
__lowerCAmelCase : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_SCREAMING_SNAKE_CASE)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase : Optional[int] = MSELoss()
__lowerCAmelCase : str = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(_SCREAMING_SNAKE_CASE)
if train_highway:
__lowerCAmelCase : List[Any] = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase : int = (loss,) + outputs
if not self.training:
__lowerCAmelCase : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 293
| 0
|
from math import log
from scipy.constants import Boltzmann, physical_constants
__lowerCamelCase : int = 300 # TEMPERATURE (unit = K)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
|
import os
__lowerCamelCase : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while index < len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ = line.strip()
SCREAMING_SNAKE_CASE__ = parse_roman_numerals(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 379
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : str = logging.get_logger(__name__)
__magic_name__ : Union[str, Any] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Any = """rwkv"""
__lowerCAmelCase : List[str] = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _A=5_0_2_7_7 , _A=1_0_2_4 , _A=4_0_9_6 , _A=3_2 , _A=None , _A=None , _A=1e-5 , _A=0 , _A=0 , _A=6 , _A=False , _A=True , **_A , ):
'''simple docstring'''
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[Any] = context_length
UpperCamelCase : str = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCamelCase : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCamelCase : Any = layer_norm_epsilon
UpperCamelCase : Optional[int] = rescale_every
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : Union[str, Any] = bos_token_id
UpperCamelCase : Any = eos_token_id
super().__init__(
tie_word_embeddings=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 102
|
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 25
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowercase_ ( _lowercase , _lowercase ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
import os
from math import logaa
def lowercase_ ( _lowercase = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCamelCase_ : float = 0
lowerCamelCase_ : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowercase ) , _lowercase ) ) ):
lowerCamelCase_, lowerCamelCase_ : Dict = list(map(_lowercase , line.split(''',''' ) ) )
if x * logaa(_lowercase ) > largest:
lowerCamelCase_ : List[str] = x * logaa(_lowercase )
lowerCamelCase_ : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 357
| 0
|
"""simple docstring"""
from math import factorial
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_lowercase ) // (factorial(_lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 163
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """umt5"""
__lowerCAmelCase = ["""past_key_values"""]
def __init__( self , snake_case_=25_0112 , snake_case_=512 , snake_case_=64 , snake_case_=1024 , snake_case_=8 , snake_case_=None , snake_case_=6 , snake_case_=32 , snake_case_=128 , snake_case_=0.1 , snake_case_=1e-6 , snake_case_=1.0 , snake_case_="gated-gelu" , snake_case_=True , snake_case_=True , snake_case_="T5Tokenizer" , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=0 , **snake_case_ , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=snake_case_ , tokenizer_class=snake_case_ , tie_word_embeddings=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
__UpperCAmelCase: Dict = vocab_size
__UpperCAmelCase: Tuple = d_model
__UpperCAmelCase: Any = d_kv
__UpperCAmelCase: Any = d_ff
__UpperCAmelCase: Any = num_layers
__UpperCAmelCase: int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase: List[str] = num_heads
__UpperCAmelCase: Tuple = relative_attention_num_buckets
__UpperCAmelCase: Union[str, Any] = relative_attention_max_distance
__UpperCAmelCase: Union[str, Any] = dropout_rate
__UpperCAmelCase: Tuple = layer_norm_epsilon
__UpperCAmelCase: Union[str, Any] = initializer_factor
__UpperCAmelCase: List[str] = feed_forward_proj
__UpperCAmelCase: List[str] = use_cache
__UpperCAmelCase: Optional[Any] = self.feed_forward_proj.split("""-""" )
__UpperCAmelCase: Optional[Any] = act_info[-1]
__UpperCAmelCase: List[Any] = act_info[0] == """gated"""
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase: str = """gelu_new"""
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.d_model
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.num_heads
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.num_layers
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__UpperCAmelCase: str = """past_encoder_sequence + sequence"""
__UpperCAmelCase: Dict = {0: """batch"""}
__UpperCAmelCase: int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__UpperCAmelCase: List[str] = {0: """batch""", 1: """decoder_sequence"""}
__UpperCAmelCase: Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase_ ( self ):
'''simple docstring'''
return 13
@property
def lowercase_ ( self ):
'''simple docstring'''
return 5e-4
| 523
| 0
|
def UpperCAmelCase__( __UpperCAmelCase : int ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
__snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : List[Any] = 0
__snake_case : Any = 2
while digits < n:
index += 1
__snake_case : Optional[Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def UpperCAmelCase__( __UpperCAmelCase : int = 10_00 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 701
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679
| 0
|
"""simple docstring"""
from __future__ import annotations
__A : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : dict[str, list[str]] , UpperCamelCase__ : str ):
A__ : int =graph
# mapping node to its parent in resulting breadth first tree
A__ : dict[str, str | None] ={}
A__ : Tuple =source_vertex
def _UpperCAmelCase ( self : int ):
A__ : Optional[Any] ={self.source_vertex}
A__ : Any =None
A__ : Any =[self.source_vertex] # first in first out queue
while queue:
A__ : Tuple =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
A__ : Optional[int] =vertex
queue.append(UpperCamelCase__ )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : List[str] =self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
A__ : Optional[Any] =(
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__A : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 656
|
"""simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCAmelCase = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : int = {}
state_dict.pop('pixel_mean' , __a )
state_dict.pop('pixel_std' , __a )
_a : List[str] = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : int = key.replace(__a , __a )
if re.match(__a , __a ):
_a : List[str] = int(re.match(__a , __a ).group(2 ) )
if layer_nb == 0:
_a : List[Any] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_a : List[str] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_a : Tuple = key.replace('layers.2' , 'proj_out' )
_a : List[str] = value
_a : List[str] = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[str] , __a : Optional[Any] , __a : Union[str, Any]="ybelkada/segment-anything" ):
"""simple docstring"""
_a : int = hf_hub_download(__a , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_a : Any = SamConfig()
elif "sam_vit_l" in model_name:
_a : Optional[int] = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
_a : Optional[int] = SamConfig(
vision_config=__a , )
elif "sam_vit_h" in model_name:
_a : Optional[Any] = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
_a : List[str] = SamConfig(
vision_config=__a , )
_a : Optional[Any] = torch.load(__a , map_location='cpu' )
_a : Union[str, Any] = replace_keys(__a )
_a : int = SamImageProcessor()
_a : Any = SamProcessor(image_processor=__a )
_a : Union[str, Any] = SamModel(__a )
hf_model.load_state_dict(__a )
_a : Optional[Any] = hf_model.to('cuda' )
_a : Any = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_a : Dict = Image.open(requests.get(__a , stream=__a ).raw ).convert('RGB' )
_a : List[str] = [[[4_0_0, 6_5_0]]]
_a : Optional[int] = [[1]]
_a : Union[str, Any] = processor(images=np.array(__a ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : Any = hf_model(**__a )
_a : Union[str, Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_a : List[str] = processor(
images=np.array(__a ) , input_points=__a , input_labels=__a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : Tuple = hf_model(**__a )
_a : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_a : Any = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
_a : str = processor(images=np.array(__a ) , input_boxes=__a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : str = hf_model(**__a )
_a : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_a : List[Any] = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
_a : str = [[1, 1]]
_a : List[Any] = processor(
images=np.array(__a ) , input_points=__a , input_labels=__a , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a : List[Any] = hf_model(**__a )
_a : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCAmelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 319
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 1
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def a ( __UpperCAmelCase : Union[str, Any] ) -> List[str]:
__magic_name__: List[str] = {}
state_dict.pop("""pixel_mean""" , __UpperCAmelCase )
state_dict.pop("""pixel_std""" , __UpperCAmelCase )
__magic_name__: List[Any] = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__magic_name__: List[Any] = key.replace(__UpperCAmelCase , __UpperCAmelCase )
if re.match(__UpperCAmelCase , __UpperCAmelCase ):
__magic_name__: str = int(re.match(__UpperCAmelCase , __UpperCAmelCase ).group(2 ) )
if layer_nb == 0:
__magic_name__: Dict = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__magic_name__: List[str] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__magic_name__: Dict = key.replace("""layers.2""" , """proj_out""" )
__magic_name__: Dict = value
__magic_name__: Optional[Any] = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int="ybelkada/segment-anything" ) -> Dict:
__magic_name__: List[Any] = hf_hub_download(__UpperCAmelCase , f'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
__magic_name__: Tuple = SamConfig()
elif "sam_vit_l" in model_name:
__magic_name__: str = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
__magic_name__: Union[str, Any] = SamConfig(
vision_config=__UpperCAmelCase , )
elif "sam_vit_h" in model_name:
__magic_name__: int = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
__magic_name__: int = SamConfig(
vision_config=__UpperCAmelCase , )
__magic_name__: List[str] = torch.load(__UpperCAmelCase , map_location="""cpu""" )
__magic_name__: Dict = replace_keys(__UpperCAmelCase )
__magic_name__: Optional[Any] = SamImageProcessor()
__magic_name__: Any = SamProcessor(image_processor=__UpperCAmelCase )
__magic_name__: str = SamModel(__UpperCAmelCase )
hf_model.load_state_dict(__UpperCAmelCase )
__magic_name__: Optional[int] = hf_model.to("""cuda""" )
__magic_name__: int = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__magic_name__: int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
__magic_name__: int = [[[4_0_0, 6_5_0]]]
__magic_name__: Optional[Any] = [[1]]
__magic_name__: Optional[int] = processor(images=np.array(__UpperCAmelCase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__magic_name__: str = hf_model(**__UpperCAmelCase )
__magic_name__: Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
__magic_name__: List[Any] = processor(
images=np.array(__UpperCAmelCase ) , input_points=__UpperCAmelCase , input_labels=__UpperCAmelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__magic_name__: Tuple = hf_model(**__UpperCAmelCase )
__magic_name__: str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
__magic_name__: Any = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
__magic_name__: List[str] = processor(images=np.array(__UpperCAmelCase ) , input_boxes=__UpperCAmelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__magic_name__: Optional[int] = hf_model(**__UpperCAmelCase )
__magic_name__: Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
__magic_name__: Optional[Any] = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
__magic_name__: List[str] = [[1, 1]]
__magic_name__: Optional[Any] = processor(
images=np.array(__UpperCAmelCase ) , input_points=__UpperCAmelCase , input_labels=__UpperCAmelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__magic_name__: int = hf_model(**__UpperCAmelCase )
__magic_name__: List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__lowerCamelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 96
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__: str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__magic_name__: Union[str, Any] = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
__magic_name__: Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__: int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__magic_name__: Optional[int] = CLIPTextModel(__snake_case )
__magic_name__: Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__: Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any]=0 ) -> Optional[Any]:
__magic_name__: Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__: Any = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" )
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: Optional[Any] = torch.manual_seed(__snake_case )
else:
__magic_name__: str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: List[Any] = self.get_dummy_components()
__magic_name__: int = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Union[str, Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: Tuple = sd_pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: List[Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: str = self.get_dummy_components()
__magic_name__: Dict = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: str = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: List[Any] = """french fries"""
__magic_name__: int = sd_pipe(**__snake_case , negative_prompt=__snake_case )
__magic_name__: Dict = output.images
__magic_name__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: int = self.get_dummy_components()
__magic_name__: Dict = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: str = self.get_dummy_inputs(__snake_case )
__magic_name__: List[str] = [inputs["""prompt"""]] * 2
__magic_name__: List[str] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__magic_name__: Optional[int] = torch.from_numpy(__snake_case ).unsqueeze(0 ).to(__snake_case )
__magic_name__: Tuple = image / 2 + 0.5
__magic_name__: Dict = image.permute(0 , 3 , 1 , 2 )
__magic_name__: List[str] = image.repeat(2 , 1 , 1 , 1 )
__magic_name__: str = sd_pipe(**__snake_case ).images
__magic_name__: List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__magic_name__: Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__magic_name__: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Union[str, Any] = self.get_dummy_components()
__magic_name__: Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__magic_name__: Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Optional[int] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = self.get_dummy_inputs(__snake_case )
__magic_name__: Tuple = sd_pipe(**__snake_case ).images
__magic_name__: Any = image[0, -3:, -3:, -1]
__magic_name__: str = [round(__snake_case , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__snake_case ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: Optional[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
__magic_name__: Tuple = self.get_dummy_components()
__magic_name__: Tuple = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: str = VaeImageProcessor(do_resize=__snake_case , do_normalize=__snake_case )
__magic_name__: Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__snake_case , input_image_type="""pt""" ) )[0]
__magic_name__: Union[str, Any] = components["""vae"""]
__magic_name__: str = self.get_dummy_inputs_by_type(__snake_case , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__magic_name__: int = vae.encode(inputs[image_param] ).latent_dist.mode()
__magic_name__: Dict = pipe(**__snake_case )[0]
__magic_name__: Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__snake_case , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : str , __snake_case : List[str]=0 ) -> Dict:
__magic_name__: Union[str, Any] = torch.manual_seed(__snake_case )
__magic_name__: Optional[int] = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__magic_name__: Optional[Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Optional[Any] = self.get_inputs()
__magic_name__: str = pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__magic_name__: Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
__magic_name__: List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: List[str] = self.get_inputs()
__magic_name__: Dict = pipe(**__snake_case ).images
__magic_name__: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
__magic_name__: int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Union[str, Any] = self.get_inputs()
__magic_name__: Any = pipe(**__snake_case ).images
__magic_name__: int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> Dict:
__magic_name__: Tuple = 0
def callback_fn(__snake_case : int , __snake_case : int , __snake_case : torch.FloatTensor ) -> None:
__magic_name__: Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__: List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__magic_name__: int = latents[0, -3:, -3:, -1]
__magic_name__: Union[str, Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__magic_name__: Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__magic_name__: str = latents[0, -3:, -3:, -1]
__magic_name__: Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__magic_name__: Tuple = False
__magic_name__: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case , torch_dtype=torch.floataa )
__magic_name__: Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Dict = self.get_inputs()
pipe(**__snake_case , callback=__snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase__ ( self : Tuple ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case , torch_dtype=torch.floataa )
__magic_name__: int = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__: Optional[int] = self.get_inputs()
__magic_name__: Any = pipe(**__snake_case )
__magic_name__: List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__magic_name__: Any = inputs["""image"""].resize((5_0_4, 5_0_4) )
__magic_name__: List[str] = """timbrooks/instruct-pix2pix"""
__magic_name__: Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: str = pipe(**__snake_case )
__magic_name__: Optional[int] = output.images[0]
__magic_name__: Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__magic_name__: Optional[Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 96
| 1
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = BertJapaneseTokenizer
UpperCamelCase_ : str = False
UpperCamelCase_ : Tuple = True
def A_ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
_UpperCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A_ ( self , a ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = """こんにちは、世界。 \nこんばんは、世界。"""
_UpperCamelCase = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def A_ ( self , a ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_input_output_texts(a )
_UpperCamelCase = tokenizer.encode(a , add_special_tokens=a )
_UpperCamelCase = tokenizer.decode(a , clean_up_tokenization_spaces=a )
return text, ids
def A_ ( self ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file )
_UpperCamelCase = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(a )
_UpperCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
_UpperCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a , """wb""" ) as handle:
pickle.dump(a , a )
with open(a , """rb""" ) as handle:
_UpperCamelCase = pickle.load(a )
_UpperCamelCase = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def A_ ( self ) -> Any:
'''simple docstring'''
try:
_UpperCamelCase = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def A_ ( self ) -> int:
'''simple docstring'''
try:
_UpperCamelCase = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = MecabTokenizer(do_lower_case=a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def A_ ( self ) -> Any:
'''simple docstring'''
try:
_UpperCamelCase = MecabTokenizer(
do_lower_case=a , normalize_text=a , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MecabTokenizer(normalize_text=a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(a )
_UpperCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
_UpperCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a , """wb""" ) as handle:
pickle.dump(a , a )
with open(a , """rb""" ) as handle:
_UpperCamelCase = pickle.load(a )
_UpperCamelCase = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
@require_sudachi
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(do_lower_case=a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(normalize_text=a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = SudachiTokenizer(trim_whitespace=a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(a )
_UpperCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
_UpperCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a , """wb""" ) as handle:
pickle.dump(a , a )
with open(a , """rb""" ) as handle:
_UpperCamelCase = pickle.load(a )
_UpperCamelCase = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
@require_jumanpp
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = JumanppTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = JumanppTokenizer(normalize_text=a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = JumanppTokenizer(trim_whitespace=a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_UpperCamelCase = {}
for i, token in enumerate(a ):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
_UpperCamelCase = tokenizer.subword_tokenizer
_UpperCamelCase = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(a , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
_UpperCamelCase = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(a , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
_UpperCamelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Dict = BertJapaneseTokenizer
UpperCamelCase_ : Optional[int] = False
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A_ ( self , **a ) -> Optional[int]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **a )
def A_ ( self , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = """こんにちは、世界。 \nこんばんは、世界。"""
_UpperCamelCase = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def A_ ( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
_UpperCamelCase = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
a , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_UpperCamelCase = {}
for i, token in enumerate(a ):
_UpperCamelCase = i
_UpperCamelCase = CharacterTokenizer(vocab=a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
_UpperCamelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = """cl-tohoku/bert-base-japanese"""
_UpperCamelCase = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , a )
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
_UpperCamelCase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 715
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
"""simple docstring"""
_UpperCamelCase = os.path.abspath(lowerCAmelCase )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_UpperCamelCase = tf.train.list_variables(lowerCAmelCase )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCamelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCamelCase = name[1:]
# figure out how many levels deep the name is
_UpperCamelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase )
# read data
_UpperCamelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
names.append("""/""".join(lowerCAmelCase ) )
arrays.append(lowerCAmelCase )
logger.info(F'Read a total of {len(lowerCAmelCase ):,} layers' )
# Sanity check
if len(set(lowerCAmelCase ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(lowerCAmelCase ) )})' )
_UpperCamelCase = list(set(lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = full_name.split("""/""" )
_UpperCamelCase = model
_UpperCamelCase = []
for i, m_name in enumerate(lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
_UpperCamelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
_UpperCamelCase = getattr(lowerCAmelCase , """encoder""" )
_UpperCamelCase = getattr(lowerCAmelCase , """layer""" )
_UpperCamelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """pooler""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """token_type_embeddings""" )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
_UpperCamelCase = getattr(lowerCAmelCase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
_UpperCamelCase = getattr(lowerCAmelCase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
_UpperCamelCase = getattr(lowerCAmelCase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
_UpperCamelCase = getattr(lowerCAmelCase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """intermediate""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
_UpperCamelCase = getattr(lowerCAmelCase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
_UpperCamelCase = getattr(lowerCAmelCase , """weight""" )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
_UpperCamelCase = """.""".join(lowerCAmelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCAmelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , lowerCAmelCase ):
_UpperCamelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCamelCase = array.transpose()
if pointer.shape == array.shape:
_UpperCamelCase = torch.from_numpy(lowerCAmelCase )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
"""simple docstring"""
logger.info(F'Loading model based on config from {config_path}...' )
_UpperCamelCase = BertConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = BertModel(lowerCAmelCase )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowerCamelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 202
| 0
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : str = logging.getLogger()
def _A ( snake_case__ : Path , snake_case__ : list ):
snake_case__ : Tuple = '''\n'''.join(snake_case__ )
Path(snake_case__ ).open('''w''' ).writelines(snake_case__ )
_lowerCAmelCase : Union[str, Any] = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : Any = "sshleifer/bart-tiny-random"
_lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-mbart"
_lowerCAmelCase : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
snake_case__ : Dict = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
snake_case__ : Tuple = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(lowerCamelCase , lowerCamelCase )
snake_case__ : int = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
snake_case__ : Dict = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
snake_case__ : str = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
snake_case__ : Any = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
snake_case__ : List[Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
snake_case__ : Tuple = Path(self.get_auto_remove_tmp_dir() )
snake_case__ : Any = str(tmp_dir / '''scores.json''' )
snake_case__ : Any = str(tmp_dir / '''val.target''' )
_dump_articles(lowerCamelCase , text['''en'''] )
_dump_articles(lowerCamelCase , text['''de'''] )
snake_case__ : List[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
snake_case__ : List[Any] = f'''
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
snake_case__ : Tuple = [''' num_beams | length_penalty''', model, '''Best score args''']
snake_case__ : Union[str, Any] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 261
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : Optional[int] = 3_2
def _A ( snake_case__ : Accelerator , snake_case__ : int = 16 ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Tuple = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case__ : int = 8
else:
snake_case__ : List[Any] = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
snake_case__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Tuple = mocked_dataloaders # noqa: F811
def _A ( snake_case__ : Optional[Any] , snake_case__ : int ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
snake_case__ : Any = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
snake_case__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[str] = config['''lr''']
snake_case__ : List[Any] = int(config['''num_epochs'''] )
snake_case__ : List[Any] = int(config['''seed'''] )
snake_case__ : Union[str, Any] = int(config['''batch_size'''] )
set_seed(snake_case__ )
snake_case__ ,snake_case__ : Tuple = get_dataloaders(snake_case__ , snake_case__ )
snake_case__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : List[Any] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
snake_case__ : Dict = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case__ : Union[str, Any] = os.path.split(snake_case__ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case__ : Tuple = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : Optional[Any] = model(**snake_case__ )
snake_case__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case__ : str = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : int = model(**snake_case__ )
snake_case__ : str = outputs.logits.argmax(dim=-1 )
snake_case__ ,snake_case__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
snake_case__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case__ ),
'''epoch''': epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A ( ):
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 261
| 1
|
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = u
for i in range(1 , UpperCamelCase__ ):
snake_case_ = temp * (u - i)
return temp
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = int(input('enter the numbers of values: ' ) )
snake_case_ = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
snake_case_ = 0
print('enter the values of parameters in a list: ' )
snake_case_ = list(map(UpperCamelCase__ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(UpperCamelCase__ ):
snake_case_ = float(input() )
snake_case_ = int(input('enter the value to interpolate: ' ) )
snake_case_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
snake_case_ = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 708
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 108
| 0
|
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def UpperCAmelCase ( _lowercase : Accelerator , _lowercase : int = 1_6 , _lowercase : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_lowercase )
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( _lowercase : int , _lowercase : Any , _lowercase : Any , _lowercase : str ) -> List[Any]:
"""simple docstring"""
model.eval()
lowerCAmelCase_ = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
lowerCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCAmelCase_ = metric.compute()
return eval_metric["accuracy"]
def UpperCAmelCase ( _lowercase : Dict , _lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''] )
lowerCAmelCase_ = int(config['''seed'''] )
lowerCAmelCase_ = int(config['''batch_size'''] )
lowerCAmelCase_ = args.model_name_or_path
set_seed(_lowercase )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
lowerCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ = 1
lowerCAmelCase_ = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
lowerCAmelCase_ = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ = 0
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowerCAmelCase_ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ = int(_lowercase ) + 1
lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowerCAmelCase_ = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ = F"""epoch_{epoch}"""
lowerCAmelCase_ = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ = accuracy
lowerCAmelCase_ = lr_scheduler.get_lr()[0]
lowerCAmelCase_ = optimizer.param_groups[0]['''lr''']
lowerCAmelCase_ = epoch
lowerCAmelCase_ = overall_step
accelerator.print(F"""epoch {epoch}:""" , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 552
| 0
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def snake_case ( snake_case : SplitDict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case ) == len(snake_case )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case ), SplitInfo(dataset_name='my_dataset' )] )
def snake_case ( snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 514
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
lowerCAmelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCAmelCase = text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCAmelCase = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _snake_case ( a_ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs['attention_mask'][:, -trim:]
lowerCAmelCase = model_inputs.pop('conversation' )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
lowerCAmelCase = model_outputs['output_ids']
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 514
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__magic_name__ : Optional[Any] = ''
__magic_name__ : Optional[int] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__magic_name__ , __magic_name__ : str = 0, 0
# length[i] shows the length of palindromic substring with center i
__magic_name__ : Dict = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
__magic_name__ : Tuple = 0
for j in range(len(lowerCAmelCase ) ):
__magic_name__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__magic_name__ : Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__magic_name__ : Union[str, Any] = j - k + 1 # noqa: E741
__magic_name__ : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
__magic_name__ : Tuple = length[j]
__magic_name__ : Tuple = j
# create that string
__magic_name__ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561
| 0
|
'''simple docstring'''
import sys
import turtle
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
lowercase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
lowercase : Union[str, Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 701
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase : Dict = logging.getLogger(__name__)
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
A : int = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if not self.initialized:
A : str = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Tuple = True
def snake_case ( self ) -> int:
self.retriever.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
A , A : str = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(__UpperCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for worker in self.retrieval_workers
] )
def snake_case ( self ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A , A : Union[str, Any] = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase ) )
else:
A , A : Any = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Any:
return super(__UpperCAmelCase , cls ).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
A : int = kwargs.pop('''config''' , __UpperCAmelCase ) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
A : Tuple = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
A : Any = rag_tokenizer.question_encoder
A : int = rag_tokenizer.generator
if indexed_dataset is not None:
A : Optional[int] = '''custom'''
A : Tuple = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase )
else:
A : Union[str, Any] = cls._build_index(__UpperCAmelCase )
return cls(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
| 423
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->int:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCAmelCase =state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
_UpperCAmelCase =in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCAmelCase =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCAmelCase =in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
_UpperCAmelCase =dct.pop(_lowerCamelCase )
_UpperCAmelCase =val
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
if "handwritten" in checkpoint_url:
_UpperCAmelCase ="https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCAmelCase ="https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
_UpperCAmelCase =ViTConfig(image_size=384 , qkv_bias=_lowerCamelCase )
_UpperCAmelCase =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCAmelCase =768
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCAmelCase =1024
_UpperCAmelCase =4096
_UpperCAmelCase =24
_UpperCAmelCase =16
_UpperCAmelCase =1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCAmelCase =False
_UpperCAmelCase ="relu"
_UpperCAmelCase =1024
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =False
# load HuggingFace model
_UpperCAmelCase =ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase )
_UpperCAmelCase =TrOCRForCausalLM(_lowerCamelCase )
_UpperCAmelCase =VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
model.eval()
# load state_dict of original model, rename some keys
_UpperCAmelCase =torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , check_hash=_lowerCamelCase )["model"]
_UpperCAmelCase =create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCAmelCase =state_dict.pop(_lowerCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
_UpperCAmelCase =val
else:
_UpperCAmelCase =val
# load state dict
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image
_UpperCAmelCase =ViTImageProcessor(size=encoder_config.image_size )
_UpperCAmelCase =RobertaTokenizer.from_pretrained("roberta-large" )
_UpperCAmelCase =TrOCRProcessor(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase =processor(images=prepare_img(_lowerCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
_UpperCAmelCase =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCAmelCase =model(pixel_values=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
_UpperCAmelCase =outputs.logits
_UpperCAmelCase =torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
snake_case__ : Any = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 408
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """spiece.model"""}
A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
A = {"""bert_for_seq_generation""": 512}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = []
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_)
return token
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_) + token
__UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_)
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string.strip()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , "wb") as fi:
__UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
| 77
| 0
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(A__ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = _distribute_shards(**A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = _split_gen_kwargs(A__ , A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( A__ , A__ ):
if expected is RuntimeError:
with pytest.raises(A__ ):
_number_of_shards_in_gen_kwargs(A__ )
else:
lowercase__ = _number_of_shards_in_gen_kwargs(A__ )
assert out == expected
| 709
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10
| 1
|
"""simple docstring"""
from timeit import timeit
a : str = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->bool:
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->bool:
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE ) // 2
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_SCREAMING_SNAKE_CASE ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->bool:
if len(_SCREAMING_SNAKE_CASE ) <= 2:
return True
if s[0] == s[len(_SCREAMING_SNAKE_CASE ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->bool:
return s == s[::-1]
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->None:
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 5_0_0_0_0_0
UpperCAmelCase__ = timeit(stmt=_SCREAMING_SNAKE_CASE , setup=_SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 422
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 256 , __lowercase = 32 , __lowercase = None , __lowercase = 0.18_215 , __lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ = nn.Convad(__lowercase , __lowercase , 1 )
UpperCAmelCase__ = VectorQuantizer(__lowercase , __lowercase , beta=0.25 , remap=__lowercase , sane_index_shape=__lowercase )
UpperCAmelCase__ = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def A__ ( self , __lowercase , __lowercase = True ):
UpperCAmelCase__ = self.encoder(__lowercase )
UpperCAmelCase__ = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def A__ ( self , __lowercase , __lowercase = False , __lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(__lowercase )
else:
UpperCAmelCase__ = h
UpperCAmelCase__ = self.post_quant_conv(__lowercase )
UpperCAmelCase__ = self.decoder(__lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def A__ ( self , __lowercase , __lowercase = True ):
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(__lowercase ).latents
UpperCAmelCase__ = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 422
| 1
|
'''simple docstring'''
from math import factorial
class __A :
'''simple docstring'''
def __init__(self , A , A ) -> str:
"""simple docstring"""
_a = real
if isinstance(A , A ):
_a = [1] * rank
else:
_a = rank
def __repr__(self ) -> List[Any]:
"""simple docstring"""
return (
f'''{self.real}+'''
f'''{'+'.join(str(A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , A )
def __add__(self , A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
return Dual(self.real + other , self.duals )
_a = self.duals.copy()
_a = other.duals.copy()
if len(A ) > len(A ):
o_dual.extend([1] * (len(A ) - len(A )) )
elif len(A ) < len(A ):
s_dual.extend([1] * (len(A ) - len(A )) )
_a = []
for i in range(len(A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , A )
__lowerCamelCase : Optional[Any] = __add__
def __sub__(self , A ) -> Optional[Any]:
"""simple docstring"""
return self + other * -1
def __mul__(self , A ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(A , A ):
_a = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , A )
_a = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , A )
__lowerCamelCase : Optional[Any] = __mul__
def __truediv__(self , A ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A , A ):
_a = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , A )
raise ValueError
def __floordiv__(self , A ) -> str:
"""simple docstring"""
if not isinstance(A , A ):
_a = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , A )
raise ValueError
def __pow__(self , A ) -> Optional[Any]:
"""simple docstring"""
if n < 0 or isinstance(A , A ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
_a = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if not callable(__A):
raise ValueError('''differentiate() requires a function as input for func''')
if not isinstance(__A , (float, int)):
raise ValueError('''differentiate() requires a float as input for position''')
if not isinstance(__A , __A):
raise ValueError('''differentiate() requires an int as input for order''')
_a = Dual(__A , 1)
_a = func(__A)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCAmelCase (__A):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 11
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : Tuple = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case__ ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
if attention_mask is None:
lowerCAmelCase_: Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase_: Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase_: Optional[int] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_: str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_: Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , ):
lowerCAmelCase_: Union[str, Any] = parent
lowerCAmelCase_: Tuple = batch_size
lowerCAmelCase_: Any = seq_length
lowerCAmelCase_: Tuple = is_training
lowerCAmelCase_: Optional[int] = use_labels
lowerCAmelCase_: List[Any] = vocab_size
lowerCAmelCase_: str = hidden_size
lowerCAmelCase_: Union[str, Any] = num_hidden_layers
lowerCAmelCase_: List[str] = num_attention_heads
lowerCAmelCase_: Dict = intermediate_size
lowerCAmelCase_: int = hidden_act
lowerCAmelCase_: Any = hidden_dropout_prob
lowerCAmelCase_: str = attention_probs_dropout_prob
lowerCAmelCase_: Union[str, Any] = max_position_embeddings
lowerCAmelCase_: Any = eos_token_id
lowerCAmelCase_: Union[str, Any] = pad_token_id
lowerCAmelCase_: Union[str, Any] = bos_token_id
lowerCAmelCase_: Optional[int] = initializer_range
def _a ( self ):
lowerCAmelCase_: List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase_: int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase_: Optional[Any] = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
lowerCAmelCase_: Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
lowerCAmelCase_: Optional[Any] = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = 20
lowerCAmelCase_: int = model_class_name(lowerCamelCase__ )
lowerCAmelCase_: Any = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase_: List[str] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase_: Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_: Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase_: Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: Any = model.decode(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: str = 20
lowerCAmelCase_: Union[str, Any] = model_class_name(lowerCamelCase__ )
lowerCAmelCase_: Tuple = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase_ , lowerCAmelCase_: List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase_: int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_: Tuple = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase_: Tuple = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCAmelCase_: Union[str, Any] = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[Any] = 99
def _a ( self ):
lowerCAmelCase_: Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase_: Optional[int] = input_ids.shape[0]
lowerCAmelCase_: str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[Any] = self._get_config_and_data()
lowerCAmelCase_: Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
lowerCAmelCase_: Any = lm_model(input_ids=lowerCamelCase__ )
lowerCAmelCase_: List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase_: Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
lowerCAmelCase_: str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase_: str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase_: Any = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
lowerCAmelCase_: List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase_: Optional[Any] = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
lowerCAmelCase_: List[Any] = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase_: Any = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowercase ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: List[Any] = True
SCREAMING_SNAKE_CASE: Optional[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE: Optional[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _a ( self ):
lowerCAmelCase_: Optional[int] = FlaxBlenderbotSmallModelTester(self )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_: Dict = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest("JIT Enabled" ):
lowerCAmelCase_: Optional[int] = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase_: int = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_: Dict = model_class(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCAmelCase_: Any = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest("JIT Enabled" ):
lowerCAmelCase_: Dict = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase_: Union[str, Any] = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_: Any = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase_: str = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 613
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Any = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCamelCase ( __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = "biogpt"
def __init__( self : Union[str, Any] , __snake_case : Tuple=4_23_84 , __snake_case : Any=10_24 , __snake_case : Optional[int]=24 , __snake_case : int=16 , __snake_case : str=40_96 , __snake_case : Dict="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : List[Any]=10_24 , __snake_case : Optional[int]=0.02 , __snake_case : Union[str, Any]=1e-12 , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : Any=0.0 , __snake_case : int=0.0 , __snake_case : Dict=1 , __snake_case : str=0 , __snake_case : List[str]=2 , **__snake_case : Optional[int] , ):
'''simple docstring'''
_snake_case: List[str] = vocab_size
_snake_case: Optional[int] = max_position_embeddings
_snake_case: Dict = hidden_size
_snake_case: Union[str, Any] = num_hidden_layers
_snake_case: str = num_attention_heads
_snake_case: Any = intermediate_size
_snake_case: List[Any] = hidden_act
_snake_case: int = hidden_dropout_prob
_snake_case: List[str] = attention_probs_dropout_prob
_snake_case: Optional[Any] = initializer_range
_snake_case: Union[str, Any] = layer_norm_eps
_snake_case: Dict = scale_embedding
_snake_case: Dict = use_cache
_snake_case: List[Any] = layerdrop
_snake_case: int = activation_dropout
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 273
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase :
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Tuple=13 , __snake_case : Any=7 , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : Optional[int]=False , __snake_case : List[Any]=True , __snake_case : str=99 , __snake_case : Optional[int]=32 , __snake_case : Any=5 , __snake_case : Tuple=4 , __snake_case : List[Any]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : int=0.1 , __snake_case : Any=5_12 , __snake_case : Dict=16 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : Any=3 , __snake_case : str=4 , __snake_case : int=None , ):
'''simple docstring'''
_snake_case: Dict = parent
_snake_case: Optional[int] = batch_size
_snake_case: List[Any] = seq_length
_snake_case: Union[str, Any] = is_training
_snake_case: Optional[Any] = use_input_mask
_snake_case: Dict = use_token_type_ids
_snake_case: Any = use_labels
_snake_case: Optional[Any] = vocab_size
_snake_case: List[Any] = hidden_size
_snake_case: int = num_hidden_layers
_snake_case: List[str] = num_attention_heads
_snake_case: List[Any] = intermediate_size
_snake_case: Optional[Any] = hidden_act
_snake_case: str = hidden_dropout_prob
_snake_case: List[str] = attention_probs_dropout_prob
_snake_case: Dict = max_position_embeddings
_snake_case: Optional[Any] = type_vocab_size
_snake_case: List[Any] = type_sequence_label_size
_snake_case: List[str] = initializer_range
_snake_case: List[str] = num_labels
_snake_case: Tuple = num_choices
_snake_case: Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: int = None
if self.use_input_mask:
_snake_case: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case: List[Any] = None
if self.use_token_type_ids:
_snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[int] = None
_snake_case: Tuple = None
_snake_case: Union[str, Any] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : int ):
'''simple docstring'''
_snake_case: Optional[Any] = LlamaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[Any] = model(__snake_case , attention_mask=__snake_case )
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Any , ):
'''simple docstring'''
_snake_case: str = True
_snake_case: int = LlamaModel(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: int = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
_snake_case: Optional[int] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , )
_snake_case: List[str] = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , ):
'''simple docstring'''
_snake_case: Union[str, Any] = LlamaForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any , ):
'''simple docstring'''
_snake_case: Any = True
_snake_case: Optional[int] = True
_snake_case: List[str] = LlamaForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
_snake_case: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , )
_snake_case: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case: str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case: Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case: Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case: Tuple = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
_snake_case: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
# select random slice
_snake_case: List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case: str = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case: Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: List[str] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): Dict = config_and_inputs
_snake_case: Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[int] = LlamaModelTester(self )
_snake_case: Dict = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case: Union[str, Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: int = 3
_snake_case: Optional[Any] = input_dict['input_ids']
_snake_case: Tuple = input_ids.ne(1 ).to(__snake_case )
_snake_case: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case: Union[str, Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Dict = 3
_snake_case: str = 'single_label_classification'
_snake_case: List[str] = input_dict['input_ids']
_snake_case: Optional[int] = input_ids.ne(1 ).to(__snake_case )
_snake_case: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case: Optional[Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Any = 3
_snake_case: Optional[int] = 'multi_label_classification'
_snake_case: Tuple = input_dict['input_ids']
_snake_case: Optional[Any] = input_ids.ne(1 ).to(__snake_case )
_snake_case: List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case: Union[str, Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[int] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Optional[int] = ids_tensor([1, 10] , config.vocab_size )
_snake_case: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case: Tuple = LlamaModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
_snake_case: List[Any] = original_model(__snake_case ).last_hidden_state
_snake_case: List[str] = original_model(__snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case: Tuple = {'type': scaling_type, 'factor': 10.0}
_snake_case: List[Any] = LlamaModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
_snake_case: Dict = scaled_model(__snake_case ).last_hidden_state
_snake_case: str = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_snake_case: Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_snake_case: int = model(torch.tensor(__snake_case ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Optional[Any] = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_snake_case: List[str] = model(torch.tensor(__snake_case ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Optional[int] = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: int = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_snake_case: Dict = model(torch.tensor(__snake_case ) )
_snake_case: Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# fmt: off
_snake_case: str = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Dict = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_snake_case: Dict = 'Simply put, the theory of relativity states that '
_snake_case: List[str] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_snake_case: Optional[Any] = tokenizer.encode(__snake_case , return_tensors='pt' )
_snake_case: Optional[int] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__snake_case )
# greedy generation outputs
_snake_case: List[Any] = model.generate(__snake_case , max_new_tokens=64 , top_p=__snake_case , temperature=1 , do_sample=__snake_case )
_snake_case: Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 273
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
_lowerCAmelCase = {
'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowerCAmelCase = model(__lowerCAmelCase )['last_hidden_state']
_lowerCAmelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice.
_lowerCAmelCase = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 309
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : Any , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : Optional[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
for processor in self:
_lowerCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(__lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
_lowerCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
else:
_lowerCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Any , __lowerCAmelCase : float ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
_lowerCAmelCase = temperature
def __call__( self : Optional[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : float , __lowerCAmelCase : float = -float('Inf' ) , __lowerCAmelCase : int = 1 ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
_lowerCAmelCase = top_p
_lowerCAmelCase = filter_value
_lowerCAmelCase = min_tokens_to_keep
def __call__( self : int , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = lax.top_k(__lowerCAmelCase , scores.shape[-1] )
_lowerCAmelCase = jnp.full_like(__lowerCAmelCase , self.filter_value )
_lowerCAmelCase = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
_lowerCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_lowerCAmelCase = jnp.roll(__lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase )
# min tokens to keep
_lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase )
_lowerCAmelCase = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = -float('Inf' ) , __lowerCAmelCase : int = 1 ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
_lowerCAmelCase = max(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = filter_value
def __call__( self : Tuple , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = scores.shape
_lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
_lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
_lowerCAmelCase , _lowerCAmelCase = lax.top_k(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_lowerCAmelCase = topk_scores.flatten()
_lowerCAmelCase = topk_indices.flatten() + shift
_lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase )
_lowerCAmelCase = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = bos_token_id
def __call__( self : List[str] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = jnp.full(scores.shape , -float('inf' ) )
_lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 )
_lowerCAmelCase = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = max_length
_lowerCAmelCase = eos_token_id
def __call__( self : Optional[int] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = jnp.full(scores.shape , -float('inf' ) )
_lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_lowerCAmelCase = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
_lowerCAmelCase = min_length
_lowerCAmelCase = eos_token_id
def __call__( self : str , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_lowerCAmelCase = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , __lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = begin_index
def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
_lowerCAmelCase = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , __lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : list ):
"""simple docstring"""
_lowerCAmelCase = list(__lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCAmelCase = dict(__lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_lowerCAmelCase = force_token_array.at[index].set(__lowerCAmelCase )
_lowerCAmelCase = jnp.intaa(__lowerCAmelCase )
def __call__( self : str , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
def _force_token(__lowerCAmelCase : int ):
_lowerCAmelCase = scores.shape[0]
_lowerCAmelCase = self.force_token_array[generation_idx]
_lowerCAmelCase = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float('inf' )
_lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_lowerCAmelCase = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) )
return new_scores
_lowerCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCAmelCase = generate_config.eos_token_id
_lowerCAmelCase = generate_config.no_timestamps_token_id
_lowerCAmelCase = generate_config.no_timestamps_token_id + 1
_lowerCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowerCAmelCase , 'max_initial_timestamp_index' ):
_lowerCAmelCase = generate_config.max_initial_timestamp_index
else:
_lowerCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_lowerCAmelCase = model_config.vocab_size
def __call__( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
_lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , )
_lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , )
return jnp.where(
__lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , __lowerCAmelCase , )
_lowerCAmelCase = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , )
_lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
_lowerCAmelCase = jnp.where(
__lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , __lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
_lowerCAmelCase = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(__lowerCAmelCase : Any , __lowerCAmelCase : str ):
_lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , __lowerCAmelCase , )
_lowerCAmelCase = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase )
return scores
| 309
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def lowercase_ ( self ) -> int:
"""simple docstring"""
super().setUp()
_lowercase: Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowercase: int = dict(zip(A_ , range(len(A_ ) ) ) )
_lowercase: List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowercase: Union[str, Any] = {'''unk_token''': '''<unk>'''}
_lowercase: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def lowercase_ ( self , **A_ ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , **A_ ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowercase: Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase: List[str] = tokenizer(A_ , max_length=len(A_ ) , padding=A_ , return_tensors='''pt''' )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowercase: Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
@require_torch
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase: Optional[int] = tokenizer(A_ , padding=A_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , A_ )
self.assertIn('''attention_mask''' , A_ )
self.assertNotIn('''labels''' , A_ )
self.assertNotIn('''decoder_attention_mask''' , A_ )
@require_torch
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Optional[int] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase: Optional[int] = tokenizer(text_target=A_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase: int = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=A_ , truncation=A_ , return_tensors='''pt''' )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Any = ['''A long paragraph for summarization.''']
_lowercase: Optional[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase: Any = tokenizer(A_ , return_tensors='''pt''' )
_lowercase: List[Any] = tokenizer(text_target=A_ , return_tensors='''pt''' )
_lowercase: Any = inputs['''input_ids''']
_lowercase: Dict = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase: Dict = ['''Summary of the text.''', '''Another summary.''']
_lowercase: Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase: int = tokenizer(A_ , padding=A_ )
_lowercase: Optional[int] = [[0] * len(A_ ) for x in encoded_output['''input_ids''']]
_lowercase: Any = tokenizer.pad(A_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def lowercase_ ( self ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase: str = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
_lowercase: Any = self.tokenizer_class.from_pretrained(A_ , **A_ )
_lowercase: Optional[int] = '''A, <mask> AllenNLP sentence.'''
_lowercase: Tuple = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
_lowercase: Tuple = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowercase: Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowercase: str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 272
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
UpperCamelCase_ = None
UpperCamelCase_ = "utf-8"
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = True # deprecated
UpperCamelCase_ = None # deprecated
UpperCamelCase_ = 10 << 20 # 10MB
UpperCamelCase_ = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ = JsonConfig
def lowercase_ ( self ) -> str:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_lowercase: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self , A_ ) -> Any:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase: int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
_lowercase: Tuple = data_files
if isinstance(A_ , A_ ):
_lowercase: Optional[Any] = [files]
_lowercase: Dict = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_lowercase: str = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
_lowercase: Optional[Any] = [files]
_lowercase: str = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={'''files''': files} ) )
return splits
def lowercase_ ( self , A_ ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowercase: Any = self.config.features.arrow_schema.field(A_ ).type
_lowercase: Optional[Any] = pa_table.append_column(A_ , pa.array([None] * len(A_ ) , type=A_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase: Optional[int] = table_cast(A_ , self.config.features.arrow_schema )
return pa_table
def lowercase_ ( self , A_ ) -> str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase: Optional[int] = json.load(A_ )
# We keep only the field we are interested in
_lowercase: str = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A_ , (list, tuple) ):
_lowercase: Dict = set().union(*[row.keys() for row in dataset] )
_lowercase: List[str] = {col: [row.get(A_ ) for row in dataset] for col in keys}
else:
_lowercase: Dict = dataset
_lowercase: Union[str, Any] = pa.Table.from_pydict(A_ )
yield file_idx, self._cast_table(A_ )
# If the file has one json object per line
else:
with open(A_ , '''rb''' ) as f:
_lowercase: int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowercase: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
_lowercase: List[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_lowercase: Union[str, Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowercase: Any = batch.decode(self.config.encoding , errors=A_ ).encode('''utf-8''' )
try:
while True:
try:
_lowercase: Optional[int] = paj.read_json(
io.BytesIO(A_ ) , read_options=paj.ReadOptions(block_size=A_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A_ , pa.ArrowInvalid )
and "straddling" not in str(A_ )
or block_size > len(A_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(A_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase: Optional[Any] = json.load(A_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A_ , A_ ): # list is the only sequence type supported in JSON
try:
_lowercase: Optional[int] = set().union(*[row.keys() for row in dataset] )
_lowercase: Tuple = {col: [row.get(A_ ) for row in dataset] for col in keys}
_lowercase: str = pa.Table.from_pydict(A_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(A_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
batch_idx += 1
| 272
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
A = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split("." ):
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
__UpperCAmelCase : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
elif weight_type == "running_mean":
__UpperCAmelCase : int = value
elif weight_type == "running_var":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase : Any = value
elif weight_type == "inv_freq":
__UpperCAmelCase : List[str] = value
else:
__UpperCAmelCase : Union[str, Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : List[Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : str = True
if "*" in mapped_key:
__UpperCAmelCase : Tuple = name.split(UpperCamelCase )[0].split("." )[-2]
__UpperCAmelCase : Any = mapped_key.replace("*" , UpperCamelCase )
if "pos_bias_u" in name:
__UpperCAmelCase : Dict = None
elif "pos_bias_v" in name:
__UpperCAmelCase : Optional[Any] = None
elif "weight_g" in name:
__UpperCAmelCase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Dict = "weight_v"
elif "bias" in name:
__UpperCAmelCase : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Tuple = "weight"
elif "running_mean" in name:
__UpperCAmelCase : int = "running_mean"
elif "inv_freq" in name:
__UpperCAmelCase : Tuple = "inv_freq"
elif "running_var" in name:
__UpperCAmelCase : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
__UpperCAmelCase : Optional[int] = "num_batches_tracked"
else:
__UpperCAmelCase : List[str] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : List[Any] = name.split("." )
__UpperCAmelCase : List[Any] = int(items[0] )
__UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCAmelCase : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCAmelCase : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
__UpperCAmelCase : int = WavaVecaConformerConfig.from_pretrained(UpperCamelCase , hidden_act="swish" )
else:
__UpperCAmelCase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__UpperCAmelCase : str = "rotary"
if is_finetuned:
if dict_path:
__UpperCAmelCase : int = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Optional[Any] = target_dict.pad_index
__UpperCAmelCase : Dict = target_dict.bos_index
__UpperCAmelCase : Dict = target_dict.eos_index
__UpperCAmelCase : List[Any] = len(target_dict.symbols )
__UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase , "vocab.json" )
if not os.path.isdir(UpperCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__UpperCAmelCase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = 1
with open(UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase , )
__UpperCAmelCase : List[str] = True if config.feat_extract_norm == "layer" else False
__UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
__UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = WavaVecaConformerForCTC(UpperCamelCase )
else:
__UpperCAmelCase : int = WavaVecaConformerForPreTraining(UpperCamelCase )
if is_finetuned:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCAmelCase : int = argparse.Namespace(task="audio_pretraining" )
__UpperCAmelCase : List[str] = fairseq.tasks.setup_task(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase )
__UpperCAmelCase : List[str] = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
A = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ShapEPipeline
lowercase_ = ["prompt"]
lowercase_ = ["prompt"]
lowercase_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase_ = False
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return 32
@property
def a_ ( self : Any):
"""simple docstring"""
return 32
@property
def a_ ( self : int):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self : List[Any]):
"""simple docstring"""
return 8
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def a_ ( self : List[str]):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase_)
@property
def a_ ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_)
return model
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : Tuple = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_)
return model
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.dummy_prior
__UpperCAmelCase : str = self.dummy_text_encoder
__UpperCAmelCase : int = self.dummy_tokenizer
__UpperCAmelCase : int = self.dummy_renderer
__UpperCAmelCase : Tuple = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__UpperCAmelCase : str = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : str = "cpu"
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_))
__UpperCAmelCase : Union[str, Any] = output.images[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCAmelCase : Union[str, Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self : Tuple):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch_device == "cpu"
__UpperCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_dummy_components()
__UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_)
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase : List[Any] = batch_size * [inputs[key]]
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : List[str]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
__UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e")
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0)
__UpperCAmelCase : int = pipe(
"a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
| 77
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__UpperCAmelCase = {
'facebook/xglm-564M': 2048,
}
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE="<s>" ,__SCREAMING_SNAKE_CASE="</s>" ,__SCREAMING_SNAKE_CASE="</s>" ,__SCREAMING_SNAKE_CASE="<s>" ,__SCREAMING_SNAKE_CASE="<unk>" ,__SCREAMING_SNAKE_CASE="<pad>" ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : List[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE : Tuple = len(self.sp_model )
SCREAMING_SNAKE_CASE : int = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE ,token_ids_a=__SCREAMING_SNAKE_CASE ,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self ,__SCREAMING_SNAKE_CASE ):
return self.sp_model.encode(__SCREAMING_SNAKE_CASE ,out_type=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self ,__SCREAMING_SNAKE_CASE ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[Any] = ''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE ,' ' ).strip()
return out_string
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE ,'wb' ) as fi:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 220
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 16 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Tuple = 'cuda'
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : Optional[int] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : int = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,return_tensors='pt' ,return_attention_mask=__SCREAMING_SNAKE_CASE ,).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : List[str] = min(start_index + batch_size ,len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Any = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] ,dim=1 )
SCREAMING_SNAKE_CASE : str = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ).logits
SCREAMING_SNAKE_CASE : int = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : List[Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Any = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 220
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = AlbertTokenizer
A__ : Any = AlbertTokenizerFast
A__ : List[str] = True
A__ : Optional[Any] = True
A__ : Tuple = True
def snake_case__ ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def snake_case__ ( self ) -> List[str]:
A__ = "<pad>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 30000 )
def snake_case__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def snake_case__ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> str:
A__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [48, 25, 21, 1289] )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.encode("sequence builders" )
A__ = tokenizer.encode("multi-sequence build" )
A__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self ) -> str:
# fmt: off
A__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 104
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str] = "data2vec-audio"
def __init__( self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=19 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=0.0_5 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="sum" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = feat_extract_activation
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = conv_pos_kernel_size
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# adapter
A__ = add_adapter
A__ = adapter_kernel_size
A__ = adapter_stride
A__ = num_adapter_layers
A__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = xvector_output_dim
@property
def snake_case__ ( self ) -> List[str]:
return math.prod(self.conv_stride )
| 104
| 1
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
super().__init__()
A_ : List[str] = pad_token_id
A_ : List[str] = max_length
A_ : int = vocab
A_ : List[Any] = merges
A_ : Optional[int] = BytePairTokenizer(snake_case_ , snake_case_ , sequence_length=snake_case_ )
@classmethod
def lowerCamelCase_ ( cls , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = [' '.join(snake_case_ ) for m in tokenizer.bpe_ranks.keys()]
A_ : Union[str, Any] = tokenizer.get_vocab()
return cls(snake_case_ , snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowerCamelCase_ ( cls , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = GPTaTokenizer.from_pretrained(snake_case_ , *snake_case_ , **snake_case_ )
return cls.from_tokenizer(snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowerCamelCase_ ( cls , snake_case_ ):
"""simple docstring"""
return cls(**snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : Dict = self.tf_tokenizer(snake_case_ )
A_ : Union[str, Any] = tf.ones_like(snake_case_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : Any = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : List[str] = pad_model_inputs(
snake_case_ , max_seq_length=snake_case_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 302
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Union[str, Any] = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : Tuple = prediction_length
A_ : Any = context_length or prediction_length
A_ : Tuple = distribution_output
A_ : Union[str, Any] = loss
A_ : Any = input_size
A_ : Dict = num_time_features
A_ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[Any] = scaling
A_ : Optional[Any] = num_dynamic_real_features
A_ : Union[str, Any] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Dict = cardinality
else:
A_ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Dict = embedding_dimension
else:
A_ : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
A_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Tuple = encoder_attention_heads
A_ : int = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Optional[Any] = decoder_ffn_dim
A_ : List[str] = encoder_layers
A_ : str = decoder_layers
A_ : Any = dropout
A_ : Optional[Any] = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : Union[str, Any] = encoder_layerdrop
A_ : Optional[int] = decoder_layerdrop
A_ : Optional[Any] = activation_function
A_ : Any = init_std
A_ : str = use_cache
# Informer
A_ : List[str] = attention_type
A_ : Optional[int] = sampling_factor
A_ : Any = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase):
_lowerCAmelCase = StableDiffusionLDMaDPipeline
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
lowerCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=6, out_channels=6, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
lowerCamelCase : List[Any] = CLIPTextModel(A )
lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self, A, A=0 ):
"""simple docstring"""
if str(A ).startswith('mps' ):
lowerCamelCase : Optional[int] = torch.manual_seed(A )
else:
lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Any = self.get_dummy_components()
lowerCamelCase : str = StableDiffusionLDMaDPipeline(**A )
lowerCamelCase : List[str] = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : int = self.get_dummy_inputs(A )
lowerCamelCase : Dict = ldmad_pipe(**A )
lowerCamelCase , lowerCamelCase : List[str] = output.rgb, output.depth
lowerCamelCase : List[str] = rgb[0, -3:, -3:, -1]
lowerCamelCase : str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase : List[Any] = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
lowerCamelCase : List[Any] = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionLDMaDPipeline(**A )
lowerCamelCase : List[str] = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : List[str] = self.get_dummy_inputs(A )
lowerCamelCase : Dict = 3 * [inputs['prompt']]
# forward
lowerCamelCase : str = ldmad_pipe(**A )
lowerCamelCase , lowerCamelCase : Any = output.rgb, output.depth
lowerCamelCase : int = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase : int = depth_slice_a[0, -3:, -1]
lowerCamelCase : List[Any] = self.get_dummy_inputs(A )
lowerCamelCase : Union[str, Any] = 3 * [inputs.pop('prompt' )]
lowerCamelCase : Union[str, Any] = ldmad_pipe.tokenizer(
A, padding='max_length', max_length=ldmad_pipe.tokenizer.model_max_length, truncation=A, return_tensors='pt', )
lowerCamelCase : Union[str, Any] = text_inputs['input_ids'].to(A )
lowerCamelCase : str = ldmad_pipe.text_encoder(A )[0]
lowerCamelCase : Tuple = prompt_embeds
# forward
lowerCamelCase : Union[str, Any] = ldmad_pipe(**A )
lowerCamelCase , lowerCamelCase : Any = output.rgb, output.depth
lowerCamelCase : List[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase : str = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase : str = StableDiffusionLDMaDPipeline(**A )
lowerCamelCase : Any = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Any = self.get_dummy_inputs(A )
lowerCamelCase : Optional[Any] = 'french fries'
lowerCamelCase : List[str] = ldmad_pipe(**A, negative_prompt=A )
lowerCamelCase , lowerCamelCase : Union[str, Any] = output.rgb, output.depth
lowerCamelCase : List[Any] = rgb[0, -3:, -3:, -1]
lowerCamelCase : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase : Optional[int] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
lowerCamelCase : Optional[Any] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
"""simple docstring"""
lowerCamelCase : Tuple = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : int = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(A ).to(device=A, dtype=A )
lowerCamelCase : Any = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
lowerCamelCase : Union[str, Any] = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Any = self.get_inputs(A )
lowerCamelCase : Tuple = ldmad_pipe(**A )
lowerCamelCase , lowerCamelCase : Optional[Any] = output.rgb, output.depth
lowerCamelCase : List[Any] = rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase : Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCamelCase : str = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
lowerCamelCase : List[Any] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
"""simple docstring"""
lowerCamelCase : List[str] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : Union[str, Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : int = torch.from_numpy(A ).to(device=A, dtype=A )
lowerCamelCase : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Any = self.get_inputs(A )
lowerCamelCase : List[Any] = ldmad_pipe(**A )
lowerCamelCase , lowerCamelCase : str = output.rgb, output.depth
lowerCamelCase : Optional[Any] = 0.49_5586
lowerCamelCase : Union[str, Any] = 0.3379_5515
lowerCamelCase : Any = 112.4_8518
lowerCamelCase : Union[str, Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : int = self.get_inputs(A )
lowerCamelCase : Tuple = ldmad_pipe(**A )
lowerCamelCase , lowerCamelCase : List[Any] = output.rgb, output.depth
lowerCamelCase : int = 0.419_4127
lowerCamelCase : str = 0.3537_5586
lowerCamelCase : Optional[Any] = 0.563_8502
lowerCamelCase : Optional[Any] = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 320
|
'''simple docstring'''
import argparse
from collections import defaultdict
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict):
lowerCamelCase : Optional[int] = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(UpperCAmelCase__ , 'r') as f:
lowerCamelCase : Any = f.readlines()
lowerCamelCase : List[Any] = F'''class {class_name}('''
lowerCamelCase : Optional[Any] = F'''{4 * ' '}def {test_name}('''
lowerCamelCase : Tuple = F'''{8 * ' '}{correct_line.split()[0]}'''
lowerCamelCase : List[Any] = F'''{16 * ' '}{correct_line.split()[0]}'''
lowerCamelCase : Any = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = 0
lowerCamelCase : int = 0
lowerCamelCase : int = []
for line in lines:
if line.startswith(UpperCAmelCase__):
lowerCamelCase : List[str] = True
elif in_class and line.startswith(UpperCAmelCase__):
lowerCamelCase : str = True
elif in_class and in_func and (line.startswith(UpperCAmelCase__) or line.startswith(UpperCAmelCase__)):
lowerCamelCase : Optional[int] = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
lowerCamelCase : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''')
lowerCamelCase : Union[str, Any] = False
else:
new_lines.append(UpperCAmelCase__)
with open(UpperCAmelCase__ , 'w') as f:
for line in new_lines:
f.write(UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=None):
if fail is not None:
with open(UpperCAmelCase__ , 'r') as f:
lowerCamelCase : Any = {l.strip() for l in f.readlines()}
else:
lowerCamelCase : Dict = None
with open(UpperCAmelCase__ , 'r') as f:
lowerCamelCase : Optional[int] = f.readlines()
lowerCamelCase : str = defaultdict(UpperCAmelCase__)
for line in correct_lines:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = line.split(';')
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 320
| 1
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _lowerCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _lowerCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 702
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 255 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : str = parent
A_ : Dict = batch_size
A_ : str = num_channels
A_ : List[str] = min_resolution
A_ : List[Any] = max_resolution
A_ : Optional[Any] = do_resize
A_ : int = size
A_ : List[Any] = do_normalize
A_ : List[Any] = image_mean
A_ : Dict = image_std
A_ : Tuple = do_rescale
A_ : List[Any] = rescale_factor
A_ : Tuple = do_pad
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , a__ , a__=False ):
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(a__ , Image.Image ):
A_ , A_ : Tuple = image.size
else:
A_ , A_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
A_ : List[str] = int(self.size["""shortest_edge"""] * h / w )
A_ : Tuple = self.size["""shortest_edge"""]
elif w > h:
A_ : Tuple = self.size["""shortest_edge"""]
A_ : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : Optional[Any] = self.size["""shortest_edge"""]
A_ : int = self.size["""shortest_edge"""]
else:
A_ : List[Any] = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(a__ , key=lambda a__ : item[0] )[0]
A_ : Tuple = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = DetaImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
A_ : str = DetaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
A_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """do_rescale""" ) )
self.assertTrue(hasattr(a__ , """do_pad""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
def _lowerCamelCase ( self ):
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , a__ )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
A_ : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : str = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : str = image_processing(a__ , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self ):
# prepare image and target
A_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : int = json.loads(f.read() )
A_ : Union[str, Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : str = DetaImageProcessor()
A_ : int = image_processing(images=a__ , annotations=a__ , return_tensors="""pt""" )
# verify pixel values
A_ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
A_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
A_ : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
A_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
A_ : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1E-3 ) )
# verify image_id
A_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
A_ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
A_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
@slow
def _lowerCamelCase ( self ):
# prepare image, target and masks_path
A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : Any = json.loads(f.read() )
A_ : Any = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : str = DetaImageProcessor(format="""coco_panoptic""" )
A_ : Tuple = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors="""pt""" )
# verify pixel values
A_ : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
A_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
A_ : Dict = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
A_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1E-3 ) )
# verify image_id
A_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
A_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify masks
A_ : Tuple = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , a__ )
# verify orig_size
A_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
A_ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
| 481
| 0
|
def SCREAMING_SNAKE_CASE__ ( _lowercase : Dict ) -> str:
'''simple docstring'''
lowercase__ : Optional[Any] = int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
lowercase__ : int = divmod(__UpperCamelCase , 2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( _lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase__ : List[Any] = str(__UpperCamelCase ).strip()
if not number:
raise ValueError('No input value was provided' )
lowercase__ : int = "-" if number.startswith('-' ) else ""
lowercase__ : Optional[int] = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f"""{negative}0b{binary_recursive(int(__UpperCamelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 266
|
import operator as op
lowerCamelCase = """scaler.pt"""
lowerCamelCase = """pytorch_model"""
lowerCamelCase = """random_states"""
lowerCamelCase = """optimizer"""
lowerCamelCase = """scheduler"""
lowerCamelCase = """pytorch_model.bin"""
lowerCamelCase = """pytorch_model.bin.index.json"""
lowerCamelCase = """model.safetensors"""
lowerCamelCase = """model.safetensors.index.json"""
lowerCamelCase = """1.10.2"""
lowerCamelCase = """py38"""
lowerCamelCase = """4.17.0"""
lowerCamelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCamelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCamelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCamelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCamelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCamelCase = """2.0.1"""
lowerCamelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCamelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCamelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCamelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCamelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCamelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 191
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :Any = {'vocab_file': 'vocab.txt'}
__lowercase :str = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowercase :Optional[int] = {
'openbmb/cpm-ant-10b': 1_024,
}
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE__ : List[str] = reader.readlines()
for index, token in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token.rstrip("\n" )
SCREAMING_SNAKE_CASE__ : Optional[int] = index
return vocab
class _a ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , a : Optional[Any] , a : Union[str, Any]="<unk>" , a : List[Any]=2_00 ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab
SCREAMING_SNAKE_CASE__ : str = unk_token
SCREAMING_SNAKE_CASE__ : Any = max_input_chars_per_word
def A_ ( self : Any , a : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
while start < len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = None
while start < end:
SCREAMING_SNAKE_CASE__ : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = end
return sub_tokens
class _a ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : List[Any] , a : Optional[int] , a : Union[str, Any]="<d>" , a : Union[str, Any]="</d>" , a : Dict="<s>" , a : int="</s>" , a : Optional[Any]="<pad>" , a : Union[str, Any]="<unk>" , a : int="</n>" , a : List[str]="</_>" , a : Tuple="left" , **a : List[str] , ) ->int:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ : List[str] = bod_token
SCREAMING_SNAKE_CASE__ : Optional[int] = eod_token
SCREAMING_SNAKE_CASE__ : Dict = load_vocab(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A_ ( self : Union[str, Any] ) ->Dict:
return self.encoder[self.bod_token]
@property
def A_ ( self : Tuple ) ->Optional[Any]:
return self.encoder[self.eod_token]
@property
def A_ ( self : Optional[int] ) ->List[Any]:
return self.encoder["\n"]
@property
def A_ ( self : Optional[Any] ) ->int:
return len(self.encoder )
def A_ ( self : int ) ->int:
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Optional[int] , a : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = []
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def A_ ( self : List[Any] , a : Union[str, Any] , **a : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : Dict , a : Any ) ->List[Any]:
return token in self.encoder
def A_ ( self : int , a : List[str] ) ->Tuple:
return "".join(UpperCamelCase__ )
def A_ ( self : Any , a : Optional[Any] ) ->Any:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def A_ ( self : str , a : str ) ->List[Any]:
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def A_ ( self : int , a : str , a : Optional[str] = None ) ->Optional[Any]:
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
SCREAMING_SNAKE_CASE__ : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE__ : List[Any] = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ : str = self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def A_ ( self : Union[str, Any] , a : List[int] , a : List[int] = None ) ->str:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) ->Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 711
|
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26
| 0
|
import math
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 0.1 ) -> Any:
a__ : Union[str, Any] = 3
a__ : Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
|
'''simple docstring'''
import baseaa
def __UpperCamelCase( _A : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def __UpperCamelCase( _A : bytes ):
'''simple docstring'''
return baseaa.baadecode(_A ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = 'Hello World!'
UpperCamelCase__ : str = baseaa_encode(test)
print(encoded)
UpperCamelCase__ : int = baseaa_decode(encoded)
print(decoded)
| 614
| 0
|
from __future__ import annotations
import math
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = u
for i in range(1 , _A ):
a_ = temp * (u - i)
return temp
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = int(input('''enter the numbers of values: ''' ) )
a_ = []
for _ in range(_A ):
y.append([] )
for i in range(_A ):
for j in range(_A ):
y[i].append(_A )
a_ = 0
print('''enter the values of parameters in a list: ''' )
a_ = list(map(_A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(_A ):
a_ = float(input() )
a_ = int(input('''enter the value to interpolate: ''' ) )
a_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _A ):
for j in range(n - i ):
a_ = y[j + 1][i - 1] - y[j][i - 1]
a_ = y[0][0]
for i in range(1 , _A ):
summ += (ucal(_A , _A ) * y[0][i]) / math.factorial(_A )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 143
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( a__ ):
def __magic_name__ ( self : List[str] ):
a_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ , '''num_attention_heads''' ) )
class __lowercase :
def __init__( self : Dict , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=1_3 , lowercase__ : List[Any]=3_2 , lowercase__ : List[str]=2 , lowercase__ : int=3 , lowercase__ : Optional[int]=6_4_0 , lowercase__ : Dict=4 , lowercase__ : Optional[int]="silu" , lowercase__ : Any=3 , lowercase__ : Optional[Any]=3_2 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Dict=0.02 , lowercase__ : Dict=True , lowercase__ : str=True , lowercase__ : Any=1_0 , lowercase__ : Union[str, Any]=None , ):
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = last_hidden_size
a_ = num_attention_heads
a_ = hidden_act
a_ = conv_kernel_size
a_ = output_stride
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = classifier_dropout_prob
a_ = use_labels
a_ = is_training
a_ = num_labels
a_ = initializer_range
a_ = scope
def __magic_name__ ( self : str ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels )
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Dict , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : str ):
a_ = MobileViTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Dict ):
a_ = self.num_labels
a_ = MobileViTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Dict , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : List[str] ):
a_ = self.num_labels
a_ = MobileViTForSemanticSegmentation(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : int ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __magic_name__ ( self : str ):
a_ = MobileViTModelTester(self )
a_ = MobileViTConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def __magic_name__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __magic_name__ ( self : Dict ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Tuple ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
pass
def __magic_name__ ( self : List[Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
def check_hidden_states_output(lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
a_ = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
a_ = outputs.hidden_states
a_ = 5
self.assertEqual(len(lowercase__ ) , lowercase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a_ = 2
for i in range(len(lowercase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def __magic_name__ ( self : Any ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
@slow
def __magic_name__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = MobileViTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[Any] ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
a_ = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
# verify the logits
a_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase__ )
a_ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Any ):
a_ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = model.to(lowercase__ )
a_ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowercase__ )
a_ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowercase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[str] ):
a_ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = model.to(lowercase__ )
a_ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits.detach().cpu()
a_ = image_processor.post_process_semantic_segmentation(outputs=lowercase__ , target_sizes=[(5_0, 6_0)] )
a_ = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowercase__ )
a_ = image_processor.post_process_semantic_segmentation(outputs=lowercase__ )
a_ = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowercase__ )
| 143
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_A : Optional[int] = logging.get_logger(__name__)
class a__ ( a_ ):
def __init__( self , *_a , **_a ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 361
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_a ).to(_a )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase : Optional[Any] = tokenizer("Hello there" , return_tensors="pt" ).input_ids
lowercase : Union[str, Any] = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
lowercase : int = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss
lowercase : str = -(labels.shape[-1] * loss.item())
lowercase : List[str] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 361
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=_A )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(_A , return_tensors="np" )
__lowerCAmelCase = processor(images=_A , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=_A )
__lowerCAmelCase = [torch.ones((1, 3, 5, 5) )]
__lowerCAmelCase = [[1_7_6_4, 2_6_4_6]]
__lowerCAmelCase = [[6_8_3, 1_0_2_4]]
__lowerCAmelCase = processor.post_process_masks(_A , _A , _A )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCAmelCase = processor.post_process_masks(
_A , torch.tensor(_A ) , torch.tensor(_A ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
__lowerCAmelCase = processor.post_process_masks(_A , np.array(_A ) , np.array(_A ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(_A ):
__lowerCAmelCase = processor.post_process_masks(_A , np.array(_A ) , np.array(_A ) )
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=_A )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(_A , return_tensors="np" )
__lowerCAmelCase = processor(images=_A , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=_A )
__lowerCAmelCase = [tf.ones((1, 3, 5, 5) )]
__lowerCAmelCase = [[1_7_6_4, 2_6_4_6]]
__lowerCAmelCase = [[6_8_3, 1_0_2_4]]
__lowerCAmelCase = processor.post_process_masks(_A , _A , _A , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCAmelCase = processor.post_process_masks(
_A , tf.convert_to_tensor(_A ) , tf.convert_to_tensor(_A ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
__lowerCAmelCase = processor.post_process_masks(
_A , np.array(_A ) , np.array(_A ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCAmelCase = processor.post_process_masks(
_A , np.array(_A ) , np.array(_A ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=_A )
__lowerCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCAmelCase = [tf.convert_to_tensor(_A )]
__lowerCAmelCase = [torch.tensor(_A )]
__lowerCAmelCase = [[1_7_6_4, 2_6_4_6]]
__lowerCAmelCase = [[6_8_3, 1_0_2_4]]
__lowerCAmelCase = processor.post_process_masks(
_A , _A , _A , return_tensors="tf" )
__lowerCAmelCase = processor.post_process_masks(
_A , _A , _A , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=_A )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(_A , return_tensors="pt" )["pixel_values"].numpy()
__lowerCAmelCase = processor(images=_A , return_tensors="pt" )["pixel_values"].numpy()
__lowerCAmelCase = image_processor(_A , return_tensors="tf" )["pixel_values"].numpy()
__lowerCAmelCase = processor(images=_A , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(_A , _A ) )
self.assertTrue(np.allclose(_A , _A ) )
self.assertTrue(np.allclose(_A , _A ) )
| 705
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=3 , _A=1_8 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = apply_ocr
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a__ ( snake_case__ , unittest.TestCase ):
_a : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__lowerCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" )
__lowerCAmelCase = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
__lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=_A )
__lowerCAmelCase = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 552
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : Any = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
__snake_case : Tuple = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( self : List[str] , **__a : Optional[int] ) -> Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A_ ( self : Union[str, Any] , **__a : str ) -> List[str]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A_ ( self : int , **__a : Dict ) -> str:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : str = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Any ) -> Any:
'''simple docstring'''
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Any = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
__snake_case : str = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : int = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
__snake_case : Optional[int] = self.get_image_processor(do_normalize=_lowerCAmelCase )
__snake_case : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=_lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = image_processor(_lowerCAmelCase , return_tensors='np' )
__snake_case : Dict = processor(images=_lowerCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : Any = self.get_image_processor()
__snake_case : str = self.get_tokenizer()
__snake_case : List[Any] = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Any = '''Alexandra,T-shirt的价格是15便士。'''
__snake_case : List[Any] = processor(text=_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Any = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : int = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : List[Any] = '''Alexandra,T-shirt的价格是15便士。'''
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : List[str] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Optional[int] = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Any = processor.batch_decode(_lowerCAmelCase )
__snake_case : List[str] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : List[Any] = ChineseCLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 286
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : str = '''deberta-v2'''
def __init__( self , _lowerCAmelCase=128100 , _lowerCAmelCase=1536 , _lowerCAmelCase=24 , _lowerCAmelCase=24 , _lowerCAmelCase=6144 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-7 , _lowerCAmelCase=False , _lowerCAmelCase=-1 , _lowerCAmelCase=0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=0 , _lowerCAmelCase="gelu" , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE: Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE: str = num_attention_heads
__SCREAMING_SNAKE_CASE: List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE: str = hidden_act
__SCREAMING_SNAKE_CASE: Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE: str = initializer_range
__SCREAMING_SNAKE_CASE: Any = relative_attention
__SCREAMING_SNAKE_CASE: Tuple = max_relative_positions
__SCREAMING_SNAKE_CASE: List[str] = pad_token_id
__SCREAMING_SNAKE_CASE: List[str] = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
__SCREAMING_SNAKE_CASE: str = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__SCREAMING_SNAKE_CASE: int = pos_att_type
__SCREAMING_SNAKE_CASE: List[Any] = vocab_size
__SCREAMING_SNAKE_CASE: List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[int] = kwargs.get('''pooler_hidden_size''' , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = pooler_dropout
__SCREAMING_SNAKE_CASE: int = pooler_hidden_act
class a ( __lowercase ):
@property
def snake_case_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE: Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE: Any = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 12
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 3 , _lowerCAmelCase = 40 , _lowerCAmelCase = 40 , _lowerCAmelCase = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 202
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : str = '''▁'''
a : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a : Optional[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
a : List[str] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
a : List[str] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self : Optional[int] , a_ : Any , a_ : int=None , a_ : List[str]=None , a_ : str="</s>" , a_ : Dict="</s>" , a_ : Optional[Any]="<s>" , a_ : Union[str, Any]="<unk>" , a_ : Tuple="<pad>" , a_ : str="<mask>" , a_ : Optional[Dict[str, Any]] = None , **a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
__snake_case = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case = 1
__snake_case = len(self.sp_model )
__snake_case = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_ )
}
__snake_case = {v: k for k, v in self.lang_code_to_id.items()}
__snake_case = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__snake_case = src_lang if src_lang is not None else "en_XX"
__snake_case = self.lang_code_to_id[self._src_lang]
__snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : Dict ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A ( self : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : Optional[int] , a_ : Dict ):
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Any ):
"""simple docstring"""
__snake_case = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : int , a_ : str ):
"""simple docstring"""
return self.sp_model.encode(a_ , out_type=a_ )
def A ( self : List[str] , a_ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Dict , a_ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : str , a_ : Any ):
"""simple docstring"""
__snake_case = []
__snake_case = ""
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(a_ )
__snake_case = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def A ( self : Dict , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def A ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
__snake_case = [1] * len(self.prefix_tokens )
__snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Union[str, Any] , a_ : Tuple , a_ : str , a_ : Optional[str] , a_ : Optional[str] , **a_ : str ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__snake_case = src_lang
__snake_case = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case = self.convert_tokens_to_ids(a_ )
__snake_case = tgt_lang_id
return inputs
def A ( self : Optional[Any] , a_ : List[str] , a_ : str = "en_XX" , a_ : Optional[List[str]] = None , a_ : str = "ro_RO" , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = src_lang
__snake_case = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Optional[Any] , a_ : str ):
"""simple docstring"""
__snake_case = self.lang_code_to_id[src_lang]
__snake_case = [self.cur_lang_code_id]
__snake_case = [self.eos_token_id]
def A ( self : int , a_ : str ):
"""simple docstring"""
__snake_case = self.lang_code_to_id[tgt_lang]
__snake_case = [self.cur_lang_code_id]
__snake_case = [self.eos_token_id]
| 709
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__UpperCamelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__UpperCamelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__UpperCamelCase : List[str] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCamelCase : Tuple = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Optional[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__UpperCamelCase : Tuple = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__UpperCamelCase : List[str] = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a_ )
class __SCREAMING_SNAKE_CASE:
def __call__( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[str] = None , UpperCamelCase: Optional[str] = None , UpperCamelCase: Union[bool, str] = False , UpperCamelCase: Union[bool, str] = False , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Optional[bool] = None , **UpperCamelCase: Any , ) -> Dict:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ = len(__UpperCamelCase )
snake_case__ = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.''' )
snake_case__ = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: BatchEncoding , UpperCamelCase: DPRReaderOutput , UpperCamelCase: int = 16 , UpperCamelCase: int = 64 , UpperCamelCase: int = 4 , ) -> str:
snake_case__ = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ = reader_output[:3]
snake_case__ = len(__UpperCamelCase )
snake_case__ = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ = []
for doc_id in sorted_docs:
snake_case__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ = sequence_ids.index(self.pad_token_id )
else:
snake_case__ = len(__UpperCamelCase )
snake_case__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: List[int] , UpperCamelCase: List[int] , UpperCamelCase: int , UpperCamelCase: int , ) -> List[Any]:
snake_case__ = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ = sorted(__UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
snake_case__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 715
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = StableDiffusionSAGPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> Any:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case__ = CLIPTextModel(UpperCamelCase )
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Optional[int]=0 ) -> Union[str, Any]:
if str(UpperCamelCase ).startswith('mps' ):
snake_case__ = torch.manual_seed(UpperCamelCase )
else:
snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
snake_case__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
snake_case__ = sag_pipe.to(UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = '.'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sag_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
snake_case__ = sag_pipe.to(UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = '.'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sag_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
snake_case__ = sag_pipe.to(UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = '.'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
snake_case__ = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 372
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.