code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def UpperCamelCase_( _snake_case : Dict , _snake_case : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[]
queue.append(_snake_case )
__a =True
while queue:
__a =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_snake_case )
__a =True
__a =u
return visited[t]
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[-1] * (len(_snake_case ))
__a =0
while bfs(_snake_case , _snake_case , _snake_case , _snake_case ):
__a =float('Inf' )
__a =sink
while s != source:
# Find the minimum value in select path
__a =min(_snake_case , graph[parent[s]][s] )
__a =parent[s]
max_flow += path_flow
__a =sink
while v != source:
__a =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a =parent[v]
return max_flow
_lowerCAmelCase : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCAmelCase : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 365
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : int=False ):
"""simple docstring"""
__a =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__a =''
else:
__a ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a =state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
__a =state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__a =in_proj_weight[
: config.hidden_size, :
]
__a =in_proj_bias[: config.hidden_size]
__a =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a =in_proj_weight[
-config.hidden_size :, :
]
__a =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : List[Any] ):
"""simple docstring"""
__a =[
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =dct.pop(_snake_case )
__a =val
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =ViTMSNConfig()
__a =1000
__a ='datasets/huggingface/label-files'
__a ='imagenet-1k-id2label.json'
__a =json.load(open(hf_hub_download(_snake_case , _snake_case ) , 'r' ) )
__a ={int(_snake_case ): v for k, v in idalabel.items()}
__a =idalabel
__a ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a =384
__a =1536
__a =6
elif "l16" in checkpoint_url:
__a =1024
__a =4096
__a =24
__a =16
__a =0.1
elif "b4" in checkpoint_url:
__a =4
elif "l7" in checkpoint_url:
__a =7
__a =1024
__a =4096
__a =24
__a =16
__a =0.1
__a =ViTMSNModel(_snake_case )
__a =torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )['target_encoder']
__a =ViTImageProcessor(size=config.image_size )
remove_projection_head(_snake_case )
__a =create_rename_keys(_snake_case , base_model=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , base_model=_snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__a =ViTImageProcessor(
size=config.image_size , image_mean=_snake_case , image_std=_snake_case )
__a =image_processor(images=_snake_case , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__a =model(**_snake_case )
__a =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a =torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__a =torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__a =torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__a =torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__a =torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _snake_case , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 366
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 0
|
import math
def UpperCamelCase_( _snake_case : float , _snake_case : float ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 367
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Dict = "\\n Text data.\n Second line of data."
_lowerCAmelCase : Any = "file"
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : List[Any] ):
"""simple docstring"""
__a =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__a =bytes(_snake_case , 'utf-8' )
with zstd.open(_snake_case , 'wb' ) as f:
f.write(_snake_case )
return path
@pytest.fixture
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _snake_case ) , 'w' ) as f:
f.write(_snake_case )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : str ):
"""simple docstring"""
__a ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__a =input_paths[compression_format]
__a =tmp_path / 'cache'
__a =DownloadConfig(cache_dir=_snake_case , extract_compressed_file=_snake_case )
__a =cached_path(_snake_case , download_config=_snake_case )
with open(_snake_case ) as f:
__a =f.read()
with open(_snake_case ) as f:
__a =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCamelCase_( _snake_case : List[str] , _snake_case : str , _snake_case : Dict , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
__a ='custom_cache'
__a ='custom_extracted_dir'
__a =tmp_path / 'custom_extracted_path'
if default_extracted:
__a =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _snake_case )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_snake_case ) )
__a =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a =xz_file
__a =(
DownloadConfig(extract_compressed_file=_snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_snake_case )
)
__a =cached_path(_snake_case , download_config=_snake_case )
assert Path(_snake_case ).parent.parts[-2:] == expected
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =str(Path(_snake_case ).resolve() )
assert cached_path(_snake_case ) == text_file
# relative path
__a =str(Path(_snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_snake_case ) == text_file
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(_snake_case ):
cached_path(_snake_case )
# relative path
__a ='./__missing_file__.txt'
with pytest.raises(_snake_case ):
cached_path(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_from_cache(F'tmp://{tmpfs_file}' )
with open(_snake_case ) as f:
__a =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , _snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
with pytest.raises(_snake_case ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _snake_case )
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_snake_case ):
http_get('https://huggingface.co' , temp_file=_snake_case )
with pytest.raises(_snake_case ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _snake_case )
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_snake_case ):
ftp_get('ftp://huggingface.co' , temp_file=_snake_case )
with pytest.raises(_snake_case ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _snake_case )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
__a =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_snake_case ):
fsspec_get('s3://huggingface.co' , temp_file=_snake_case )
with pytest.raises(_snake_case ):
fsspec_head('s3://huggingface.co' )
| 369
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 0
|
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase_( _snake_case : np.ndarray , _snake_case : tuple[int, int] , _snake_case : tuple[int, int] , _snake_case : bool , ):
"""simple docstring"""
__a , __a =grid.shape
__a =[-1, 1, 0, 0]
__a =[0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__a , __a =[(0, source)], set()
__a =np.full((rows, cols) , np.inf )
__a =0
__a =np.empty((rows, cols) , dtype=_snake_case )
__a =None
while queue:
((__a) , (__a)) =heappop(_snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__a =[]
while (x, y) != source:
path.append((x, y) )
__a , __a =predecessors[x, y]
path.append(_snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_snake_case ) ):
__a , __a =x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__a =grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_snake_case , (dist + 1, (nx, ny)) )
__a =dist + 1
__a =(x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__a) , (__a)) =extended_euclid(_snake_case , a % b )
__a =a // b
return (y, x - k * y)
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
((__a) , (__a)) =extended_euclid(_snake_case , _snake_case )
__a =na * na
__a =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
((__a) , (__a)) =extended_euclid(_snake_case , _snake_case )
if b < 0:
__a =(b % n + n) % n
return b
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a , __a =invert_modulo(_snake_case , _snake_case ), invert_modulo(_snake_case , _snake_case )
__a =na * na
__a =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 371
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 0
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : List[str] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_lowerCAmelCase : str = {"allegro/herbert-base-cased": 514}
_lowerCAmelCase : Optional[Any] = {}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = HerbertTokenizer
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case="</s>" , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sep_token=__snake_case , **__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.cls_token_id]
__a =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
__a =self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 0
|
from __future__ import annotations
from statistics import mean
def UpperCamelCase_( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ):
"""simple docstring"""
__a =[0] * no_of_processes
__a =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_snake_case ):
__a =burst_time[i]
__a =[]
__a =0
__a =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__a =[]
__a =-1
for i in range(_snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_snake_case )
if len(_snake_case ) > 0:
__a =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__a =i
total_time += burst_time[target_process]
completed += 1
__a =0
__a =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCamelCase_( _snake_case : list[int] , _snake_case : int , _snake_case : list[int] ):
"""simple docstring"""
__a =[0] * no_of_processes
for i in range(_snake_case ):
__a =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Tuple = [2, 5, 3, 7]
_lowerCAmelCase : Union[str, Any] = [0, 0, 0, 0]
_lowerCAmelCase : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCAmelCase : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 351
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'AutoImageProcessor'
SCREAMING_SNAKE_CASE = 'AutoTokenizer'
def __init__( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
__a =self.image_processor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a =self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
__a =self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
__a =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 352
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 0
|
from __future__ import annotations
def UpperCamelCase_( _snake_case : float , _snake_case : float , _snake_case : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__a =AutoTokenizer.from_pretrained('google/mt5-small' )
__a =tokenizer('Hello there' , return_tensors='tf' ).input_ids
__a =tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__a =model(__snake_case , labels=__snake_case ).loss
__a =-tf.math.reduce_mean(__snake_case ).numpy()
__a =-21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 354
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 0
|
from functools import lru_cache
@lru_cache
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCamelCase_( _snake_case : str , _snake_case : complex , _snake_case : str = "x" , _snake_case : float = 10**-10 , _snake_case : int = 1 , ):
"""simple docstring"""
__a =symbols(_snake_case )
__a =lambdify(_snake_case , _snake_case )
__a =lambdify(_snake_case , diff(_snake_case , _snake_case ) )
__a =starting_point
while True:
if diff_function(_snake_case ) != 0:
__a =prev_guess - multiplicity * func(_snake_case ) / diff_function(
_snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__a =next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 356
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 0
|
_lowerCAmelCase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( ):
"""simple docstring"""
__a =input('Enter message: ' )
__a =input('Enter key [alphanumeric]: ' )
__a =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
__a ='encrypt'
__a =encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith('d' ):
__a ='decrypt'
__a =decrypt_message(_snake_case , _snake_case )
print(F'\n{mode.title()}ed message:' )
print(_snake_case )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
return translate_message(_snake_case , _snake_case , 'encrypt' )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
return translate_message(_snake_case , _snake_case , 'decrypt' )
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =[]
__a =0
__a =key.upper()
for symbol in message:
__a =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_snake_case ):
__a =0
else:
translated.append(_snake_case )
return "".join(_snake_case )
if __name__ == "__main__":
main()
| 357
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
"""simple docstring"""
def UpperCamelCase_( _snake_case : list , _snake_case : list , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if index == number_of_items:
return 0
__a =0
__a =0
__a =knapsack(_snake_case , _snake_case , _snake_case , _snake_case , index + 1 )
if weights[index] <= max_weight:
__a =values[index] + knapsack(
_snake_case , _snake_case , _snake_case , max_weight - weights[index] , index + 1 )
return max(_snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCamelCase_( _snake_case : np.ndarray , _snake_case : Optional[str] , _snake_case : Optional[str] = None ):
"""simple docstring"""
__a =tesseract_config if tesseract_config is not None else ''
# apply OCR
__a =to_pil_image(_snake_case )
__a , __a =pil_image.size
__a =pytesseract.image_to_data(_snake_case , lang=_snake_case , output_type='dict' , config=_snake_case )
__a , __a , __a , __a , __a =data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__a =[idx for idx, word in enumerate(_snake_case ) if not word.strip()]
__a =[word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
__a =[coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
__a =[coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
__a =[coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
__a =[coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__a =[]
for x, y, w, h in zip(_snake_case , _snake_case , _snake_case , _snake_case ):
__a =[x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
__a =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case , _snake_case , _snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = True , __snake_case = None , __snake_case = "" , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'height': 224, 'width': 224}
__a =get_size_dict(__snake_case )
__a =do_resize
__a =size
__a =resample
__a =apply_ocr
__a =ocr_lang
__a =tesseract_config
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BILINEAR , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__a =(size['height'], size['width'])
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case )
__a =resample if resample is not None else self.resample
__a =apply_ocr if apply_ocr is not None else self.apply_ocr
__a =ocr_lang if ocr_lang is not None else self.ocr_lang
__a =tesseract_config if tesseract_config is not None else self.tesseract_config
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
__a =[]
__a =[]
for image in images:
__a , __a =apply_tesseract(__snake_case , __snake_case , __snake_case )
words_batch.append(__snake_case )
boxes_batch.append(__snake_case )
if do_resize:
__a =[self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__a =[flip_channel_order(__snake_case ) for image in images]
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a =BatchFeature(data={'pixel_values': images} , tensor_type=__snake_case )
if apply_ocr:
__a =words_batch
__a =boxes_batch
return data
| 359
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Any ):
"""simple docstring"""
__a =b.T
__a =np.sum(np.square(_snake_case ) , axis=1 )
__a =np.sum(np.square(_snake_case ) , axis=0 )
__a =np.matmul(_snake_case , _snake_case )
__a =aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase_( _snake_case : str , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =x.reshape(-1 , 3 )
__a =squared_euclidean_distance(_snake_case , _snake_case )
return np.argmin(_snake_case , axis=1 )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = True , __snake_case = True , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'height': 256, 'width': 256}
__a =get_size_dict(__snake_case )
__a =np.array(__snake_case ) if clusters is not None else None
__a =do_resize
__a =size
__a =resample
__a =do_normalize
__a =do_color_quantize
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BILINEAR , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case , size=(size['height'], size['width']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , ) -> np.ndarray:
'''simple docstring'''
__a =rescale(image=__snake_case , scale=1 / 127.5 , data_format=__snake_case )
__a =image - 1
return image
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case )
__a =resample if resample is not None else self.resample
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__a =clusters if clusters is not None else self.clusters
__a =np.array(__snake_case )
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a =[self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_normalize:
__a =[self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
__a =[to_channel_dimension_format(__snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__a =np.array(__snake_case )
__a =color_quantize(__snake_case , __snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__a =images.shape[0]
__a =images.reshape(__snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__a =list(__snake_case )
else:
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a ={'input_ids': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 360
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 361
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 0
|
from PIL import Image
def UpperCamelCase_( _snake_case : Image , _snake_case : float ):
"""simple docstring"""
def brightness(_snake_case : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_snake_case )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCAmelCase : str = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 363
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'mgp-str'
def __init__( self , __snake_case=[32, 128] , __snake_case=4 , __snake_case=3 , __snake_case=27 , __snake_case=38 , __snake_case=5_0257 , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=4.0 , __snake_case=True , __snake_case=False , __snake_case=1e-5 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=False , __snake_case=0.02 , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =max_token_length
__a =num_character_labels
__a =num_bpe_labels
__a =num_wordpiece_labels
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =mlp_ratio
__a =distilled
__a =layer_norm_eps
__a =drop_rate
__a =qkv_bias
__a =attn_drop_rate
__a =drop_path_rate
__a =output_aa_attentions
__a =initializer_range
| 364
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ) -> Optional[int]:
'''simple docstring'''
__a =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __snake_case , )
__a =kwargs.pop('feature_extractor' )
__a =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a =self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
__a =self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
__a =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer.model_input_names
__a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __snake_case , )
return self.image_processor_class
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __snake_case , )
return self.image_processor
| 365
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 0
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase_( _snake_case : Union[str, Any]=32 , _snake_case : Any=10 , _snake_case : Optional[Any]=100 , _snake_case : List[Any]=1026 , _snake_case : List[str]=True , _snake_case : str="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : List[str]="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
__a , __a =generate_datasets(
_snake_case , _snake_case , number=_snake_case , min_len=1026 , trim=_snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__a =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__a =load_gpta('gpt2' ).to(_snake_case )
print('computing perplexity on objective set' )
__a =compute_perplexity(_snake_case , _snake_case , _snake_case ).item()
print('perplexity on objective set:' , _snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : str=15 , _snake_case : Dict=128 , _snake_case : Union[str, Any]=100 , _snake_case : Dict="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
__a =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__a =SecondaryLearner(_snake_case )
# Train secondary learner
__a =train_secondary_learner(
_snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=100 , igf_model_path=_snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[str]=32 , _snake_case : int=1000 , _snake_case : Union[str, Any]=16 , _snake_case : str=1.0 , _snake_case : Union[str, Any]=recopy_gpta , _snake_case : Union[str, Any]=None , _snake_case : int=10 , _snake_case : Any="gpt2_finetuned.pt" , ):
"""simple docstring"""
__a =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__a =RandomSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case )
__a =max_steps // (len(_snake_case )) + 1
__a =0
__a =torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case )
__a , __a , __a =recopy_model(_snake_case , _snake_case , _snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(_snake_case )
secondary_learner.eval()
__a =[]
__a =0
__a =[]
__a =[]
# Compute the performance of the transformer model at the beginning
__a =compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print('Test perplexity, step' , _snake_case , ':' , _snake_case )
for epoch in range(int(_snake_case ) ):
for step, example in enumerate(_snake_case ):
torch.cuda.empty_cache()
__a =random.randint(0 , example.size(2 ) - context_len - 1 )
__a =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__a =model(_snake_case , labels=_snake_case )
__a =True
if secondary_learner is not None:
__a =secondary_learner.forward(
torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__a =-1
if predicted_q < threshold:
__a =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__a =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__a =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__a =compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print('Test perplexity, step' , _snake_case , ':' , _snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_snake_case , type=_snake_case , required=_snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_snake_case , default=_snake_case , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_snake_case , default=_snake_case , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_snake_case , type=_snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_snake_case , default=_snake_case , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_snake_case , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_snake_case , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=_snake_case , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_snake_case , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_snake_case , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_snake_case , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_snake_case , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=_snake_case , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_snake_case , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_snake_case , type=_snake_case , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_snake_case , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_snake_case , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_snake_case , type=_snake_case , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_snake_case , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__a =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__a =training_secondary_learner(
_snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__a =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__a , __a =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=_snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_snake_case , _snake_case , _snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 366
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCamelCase_( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def UpperCamelCase_( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def UpperCamelCase_( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_snake_case ):
http_head('https://huggingface.co' )
| 367
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_lowerCAmelCase : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : tuple , _snake_case : Path , _snake_case : Any , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : int=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_snake_case , _snake_case , f=output_path.as_posix() , input_names=_snake_case , output_names=_snake_case , dynamic_axes=_snake_case , do_constant_folding=_snake_case , use_external_data_format=_snake_case , enable_onnx_checker=_snake_case , opset_version=_snake_case , )
else:
export(
_snake_case , _snake_case , f=output_path.as_posix() , input_names=_snake_case , output_names=_snake_case , dynamic_axes=_snake_case , do_constant_folding=_snake_case , opset_version=_snake_case , )
@torch.no_grad()
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : int , _snake_case : bool = False ):
"""simple docstring"""
__a =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a ='cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__a ='cpu'
__a =StableDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=_snake_case ).to(_snake_case )
__a =Path(_snake_case )
# TEXT ENCODER
__a =pipeline.text_encoder.config.max_position_embeddings
__a =pipeline.text_encoder.config.hidden_size
__a =pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_snake_case , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_snake_case , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=_snake_case , )
del pipeline.text_encoder
# UNET
__a =pipeline.unet.config.in_channels
__a =pipeline.unet.config.sample_size
__a =output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ),
torch.randn(2 ).to(device=_snake_case , dtype=_snake_case ),
torch.randn(2 , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ),
False,
) , output_path=_snake_case , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=_snake_case , use_external_data_format=_snake_case , )
__a =str(unet_path.absolute().as_posix() )
__a =os.path.dirname(_snake_case )
__a =onnx.load(_snake_case )
# clean up existing tensor files
shutil.rmtree(_snake_case )
os.mkdir(_snake_case )
# collate external tensor files into one
onnx.save_model(
_snake_case , _snake_case , save_as_external_data=_snake_case , all_tensors_to_one_file=_snake_case , location='weights.pb' , convert_attribute=_snake_case , )
del pipeline.unet
# VAE ENCODER
__a =pipeline.vae
__a =vae_encoder.config.in_channels
__a =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__a =lambda _snake_case , _snake_case : vae_encoder.encode(_snake_case , _snake_case )[0].sample()
onnx_export(
_snake_case , model_args=(
torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_snake_case , )
# VAE DECODER
__a =pipeline.vae
__a =vae_decoder.config.latent_channels
__a =vae_decoder.config.out_channels
# forward only through the decoder part
__a =vae_encoder.decode
onnx_export(
_snake_case , model_args=(
torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_snake_case , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__a =pipeline.safety_checker
__a =safety_checker.config.vision_config.num_channels
__a =safety_checker.config.vision_config.image_size
__a =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _snake_case , _snake_case , _snake_case , ).to(device=_snake_case , dtype=_snake_case ),
torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=_snake_case , )
del pipeline.safety_checker
__a =OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
__a =pipeline.feature_extractor
else:
__a =None
__a =None
__a =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_snake_case , feature_extractor=_snake_case , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_snake_case )
print('ONNX pipeline saved to' , _snake_case )
del pipeline
del onnx_pipeline
__a =OnnxStableDiffusionPipeline.from_pretrained(_snake_case , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowerCAmelCase : List[str] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 368
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 0
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 370
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , __snake_case , __snake_case = None , __snake_case = None ) -> Dict:
'''simple docstring'''
super().__init__()
__a =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a =torch.zeros(__snake_case , __snake_case )
else:
__a =None
__a =torch.nn.Parameter(__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
__a =len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__a =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__a =text_input_ids[:, : self.tokenizer.model_max_length]
__a =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
__a =prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a =self.learned_classifier_free_sampling_embeddings.embeddings
__a =negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
__a =[''] * batch_size
__a =text_input_ids.shape[-1]
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='pt' , )
__a =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__a =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a =negative_prompt_embeds.shape[1]
__a =negative_prompt_embeds.repeat(1 , __snake_case , 1 )
__a =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 100 , __snake_case = 5.0 , __snake_case = 1.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__a =1
elif isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
__a =batch_size * num_images_per_prompt
__a =guidance_scale > 1.0
__a =self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get the initial completely masked latents unless the user supplied it
__a =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a =self.transformer.num_vector_embeds - 1
__a =torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__a =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
__a =self.scheduler.timesteps.to(self.device )
__a =latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
__a =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a =self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
__a , __a =model_output.chunk(2 )
__a =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
__a =self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
__a =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__a =self.vqvae.config.vq_embed_dim
__a =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a =self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
__a =self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
__a =(image / 2 + 0.5).clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> torch.FloatTensor:
'''simple docstring'''
__a , __a =torch.sort(__snake_case , 1 , descending=__snake_case )
__a =torch.exp(__snake_case )
__a =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a =torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
__a =torch.cat((all_true, keep_mask) , dim=1 )
__a =keep_mask[:, :-1, :]
__a =keep_mask.gather(1 , indices.argsort(1 ) )
__a =log_p_x_0.clone()
__a =-torch.inf # -inf = log(0)
return rv
| 371
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_( _snake_case : Callable[[int | float], int | float] , _snake_case : int | float , _snake_case : int | float , _snake_case : int = 100 , ):
"""simple docstring"""
__a =x_start
__a =fnc(_snake_case )
__a =0.0
for _ in range(_snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
__a =(x_end - x_start) / steps + xa
__a =fnc(_snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__a =xa
__a =fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_( _snake_case : Dict ):
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_lowerCAmelCase : Union[str, Any] = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 0
|
from math import sqrt
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =0
for i in range(1 , int(sqrt(_snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(_snake_case ):
total += i + n // i
elif i == sqrt(_snake_case ):
total += i
return total - n
def UpperCamelCase_( _snake_case : int = 10000 ):
"""simple docstring"""
__a =sum(
i
for i in range(1 , _snake_case )
if sum_of_divisors(sum_of_divisors(_snake_case ) ) == i and sum_of_divisors(_snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 351
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 0
|
from __future__ import annotations
def UpperCamelCase_( _snake_case : int = 4 ):
"""simple docstring"""
__a =abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
__a =[list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
__a =matrix[::-1]
return matrix
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
__a =[x[::-1] for x in matrix]
return matrix
def UpperCamelCase_( _snake_case : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
_lowerCAmelCase : Union[str, Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
_lowerCAmelCase : Any = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 352
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 353
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 0
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase : Optional[Any] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCAmelCase : Optional[Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCAmelCase : List[Any] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCAmelCase : Tuple = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCAmelCase : int = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase : Optional[int] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCAmelCase : Optional[int] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def UpperCamelCase_( ):
"""simple docstring"""
__a , __a =randrange(len(_snake_case ) ), randrange(len(_snake_case ) )
__a =['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__a , __a =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase_( _snake_case : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(_snake_case ))
@pytest.mark.parametrize('hand, expected' , _snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(_snake_case )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _snake_case )
def UpperCamelCase_( _snake_case : Any , _snake_case : int ):
"""simple docstring"""
assert PokerHand(_snake_case )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _snake_case )
def UpperCamelCase_( _snake_case : int , _snake_case : List[str] , _snake_case : Any ):
"""simple docstring"""
__a =PokerHand(_snake_case )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _snake_case )
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : str ):
"""simple docstring"""
assert PokerHand(_snake_case )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _snake_case )
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Tuple ):
"""simple docstring"""
assert PokerHand(_snake_case )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str ):
"""simple docstring"""
assert PokerHand(_snake_case ).compare_with(PokerHand(_snake_case ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
assert PokerHand(_snake_case ).compare_with(PokerHand(_snake_case ) ) == expected
def UpperCamelCase_( ):
"""simple docstring"""
__a =[PokerHand(_snake_case ) for hand in SORTED_HANDS]
__a =poker_hands.copy()
shuffle(_snake_case )
__a =chain(sorted(_snake_case ) )
for index, hand in enumerate(_snake_case ):
assert hand == poker_hands[index]
def UpperCamelCase_( ):
"""simple docstring"""
__a =[PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_snake_case )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase_( ):
"""simple docstring"""
__a =PokerHand('2C 4S AS 3D 5C' )
__a =True
__a =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase_( ):
"""simple docstring"""
__a =0
__a =os.path.abspath(os.path.dirname(_snake_case ) )
__a =os.path.join(_snake_case , 'poker_hands.txt' )
with open(_snake_case ) as file_hand:
for line in file_hand:
__a =line[:14].strip()
__a =line[15:].strip()
__a , __a =PokerHand(_snake_case ), PokerHand(_snake_case )
__a =player.compare_with(_snake_case )
if output == "Win":
answer += 1
assert answer == 376
| 354
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 0
|
from __future__ import annotations
from random import choice
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
return choice(_snake_case )
def UpperCamelCase_( _snake_case : list[int] , _snake_case : int ):
"""simple docstring"""
__a =random_pivot(_snake_case )
# partition based on pivot
# linear time
__a =[e for e in lst if e < pivot]
__a =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_snake_case ) < k - 1:
return kth_number(_snake_case , k - len(_snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCAmelCase : Optional[Any] = "bert-base-cased"
_lowerCAmelCase : Optional[Any] = "google/pegasus-xsum"
_lowerCAmelCase : Tuple = [" Sam ate lunch today.", "Sams lunch ingredients."]
_lowerCAmelCase : List[Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
_lowerCAmelCase : Optional[Any] = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : List[Any] = "sshleifer/bart-tiny-random"
_lowerCAmelCase : List[str] = "sshleifer/tiny-mbart"
_lowerCAmelCase : Dict = "sshleifer/tiny-marian-en-de"
def UpperCamelCase_( _snake_case : Path , _snake_case : list ):
"""simple docstring"""
__a ='\n'.join(_snake_case )
Path(_snake_case ).open('w' ).writelines(_snake_case )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_snake_case , F'{split}.source' ) , _snake_case )
_dump_articles(os.path.join(_snake_case , F'{split}.target' ) , _snake_case )
return tmp_dir
class __magic_name__ ( lowerCAmelCase_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained(__snake_case )
__a =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in ARTICLES )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in SUMMARIES )
__a =4
__a =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__a , __a ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
__a =SeqaSeqDataset(
__snake_case , data_dir=__snake_case , type_path='train' , max_source_length=__snake_case , max_target_length=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , )
__a =DataLoader(__snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__snake_case , __snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__a =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained(__snake_case )
__a =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in ARTICLES )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in SUMMARIES )
__a =4
__a =LegacySeqaSeqDataset(
__snake_case , data_dir=__snake_case , type_path='train' , max_source_length=20 , max_target_length=__snake_case , )
__a =DataLoader(__snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
__a =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__a =tmp_dir.joinpath('train.source' ).open().readlines()
__a =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__snake_case , __snake_case , 128 , __snake_case )
__a ={x.name for x in tmp_dir.iterdir()}
__a ={x.name for x in save_dir.iterdir()}
__a =save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__snake_case ) < len(__snake_case )
assert len(__snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(__snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__a , __a , __a =self._get_dataset(max_len=64 )
__a =64
__a =ds.make_dynamic_sampler(__snake_case , required_batch_size_multiple=__snake_case )
__a =[len(__snake_case ) for x in batch_sampler]
assert len(set(__snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__snake_case ) == len(__snake_case ) # no dropped or added examples
__a =DataLoader(__snake_case , batch_sampler=__snake_case , collate_fn=ds.collate_fn , num_workers=2 )
__a =[]
__a =[]
for batch in data_loader:
__a =batch['input_ids'].shape
__a =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__a =np.product(batch['input_ids'].shape )
num_src_per_batch.append(__snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__snake_case )
assert num_src_per_batch[0] == max(__snake_case )
if failures:
raise AssertionError(f'too many tokens in {len(__snake_case )} batches' )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a , __a , __a =self._get_dataset(max_len=512 )
__a =2
__a =ds.make_sortish_sampler(__snake_case , shuffle=__snake_case )
__a =DataLoader(__snake_case , batch_size=__snake_case , collate_fn=ds.collate_fn , num_workers=2 )
__a =DataLoader(__snake_case , batch_size=__snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=__snake_case )
__a =tokenizer.pad_token_id
def count_pad_tokens(__snake_case , __snake_case="input_ids" ):
return [batch[k].eq(__snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__snake_case , k='labels' ) ) < sum(count_pad_tokens(__snake_case , k='labels' ) )
assert sum(count_pad_tokens(__snake_case ) ) < sum(count_pad_tokens(__snake_case ) )
assert len(__snake_case ) == len(__snake_case )
def __magic_name__ ( self , __snake_case=1000 , __snake_case=128 ) -> Optional[Any]:
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , __snake_case ):
__a ='examples/seq2seq/wmt_en_ro'
__a =max_len * 2 * 64
if not Path(__snake_case ).joinpath('train.len' ).exists():
save_len_file(__snake_case , __snake_case )
else:
__a ='examples/seq2seq/test_data/wmt_en_ro'
__a =max_len * 4
save_len_file(__snake_case , __snake_case )
__a =AutoTokenizer.from_pretrained(__snake_case )
__a =SeqaSeqDataset(
__snake_case , data_dir=__snake_case , type_path='train' , max_source_length=__snake_case , max_target_length=__snake_case , n_obs=__snake_case , )
return ds, max_tokens, tokenizer
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a , __a , __a =self._get_dataset()
__a =set(DistributedSortishSampler(__snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=__snake_case ) )
__a =set(DistributedSortishSampler(__snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=__snake_case ) )
assert idsa.intersection(__snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained(__snake_case , use_fast=__snake_case )
if tok_name == MBART_TINY:
__a =SeqaSeqDataset(
__snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__a =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__a =SeqaSeqDataset(
__snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__a =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__snake_case ) == 1 if tok_name == BART_TINY else len(__snake_case ) == 0
| 356
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __magic_name__ ( tf.keras.layers.Layer ):
def __init__( self , __snake_case , __snake_case , __snake_case = None , __snake_case = None ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__a =pad_token_id
__a =max_length
__a =vocab
__a =merges
__a =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
__a =[' '.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__a =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
__a =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case ) -> List[str]:
'''simple docstring'''
return cls(**__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> int:
'''simple docstring'''
__a =self.tf_tokenizer(__snake_case )
__a =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__a =max_length if max_length is not None else self.max_length
if max_length is not None:
__a , __a =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 357
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=18 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=None , __snake_case=True , ) -> Optional[Any]:
'''simple docstring'''
__a =size if size is not None else {'shortest_edge': 20}
__a =crop_size if crop_size is not None else {'height': 18, 'width': 18}
__a =parent
__a =batch_size
__a =num_channels
__a =image_size
__a =min_resolution
__a =max_resolution
__a =do_resize
__a =size
__a =do_center_crop
__a =crop_size
__a =do_flip_channel_order
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileViTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =MobileViTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
self.assertTrue(hasattr(__snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(__snake_case , 'center_crop' ) )
self.assertTrue(hasattr(__snake_case , 'do_flip_channel_order' ) )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__a =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 358
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__a =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__a =model(__snake_case )['last_hidden_state']
__a =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
__a =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 359
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , __snake_case=1 / 255 , __snake_case=True , ) -> List[Any]:
'''simple docstring'''
__a =size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__a =parent
__a =batch_size
__a =num_channels
__a =min_resolution
__a =max_resolution
__a =do_resize
__a =size
__a =do_normalize
__a =image_mean
__a =image_std
__a =do_rescale
__a =rescale_factor
__a =do_pad
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __magic_name__ ( self , __snake_case , __snake_case=False ) -> List[str]:
'''simple docstring'''
if not batched:
__a =image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__a , __a =image.size
else:
__a , __a =image.shape[1], image.shape[2]
if w < h:
__a =int(self.size['shortest_edge'] * h / w )
__a =self.size['shortest_edge']
elif w > h:
__a =self.size['shortest_edge']
__a =int(self.size['shortest_edge'] * w / h )
else:
__a =self.size['shortest_edge']
__a =self.size['shortest_edge']
else:
__a =[]
for image in image_inputs:
__a , __a =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a =max(__snake_case , key=lambda __snake_case : item[0] )[0]
__a =max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = DetaImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =DetaImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'image_mean' ) )
self.assertTrue(hasattr(__snake_case , 'image_std' ) )
self.assertTrue(hasattr(__snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(__snake_case , 'do_pad' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__a =json.loads(f.read() )
__a ={'image_id': 3_9769, 'annotations': target}
# encode them
__a =DetaImageProcessor()
__a =image_processing(images=__snake_case , annotations=__snake_case , return_tensors='pt' )
# verify pixel values
__a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __snake_case )
__a =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __snake_case , atol=1e-4 ) )
# verify area
__a =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __snake_case ) )
# verify boxes
__a =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __snake_case )
__a =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __snake_case , atol=1e-3 ) )
# verify image_id
__a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __snake_case ) )
# verify is_crowd
__a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __snake_case ) )
# verify class_labels
__a =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __snake_case ) )
# verify orig_size
__a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __snake_case ) )
# verify size
__a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __snake_case ) )
@slow
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__a =json.loads(f.read() )
__a ={'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__a =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__a =DetaImageProcessor(format='coco_panoptic' )
__a =image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='pt' )
# verify pixel values
__a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __snake_case )
__a =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __snake_case , atol=1e-4 ) )
# verify area
__a =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __snake_case ) )
# verify boxes
__a =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __snake_case )
__a =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __snake_case , atol=1e-3 ) )
# verify image_id
__a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __snake_case ) )
# verify is_crowd
__a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __snake_case ) )
# verify class_labels
__a =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __snake_case ) )
# verify masks
__a =82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __snake_case )
# verify orig_size
__a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __snake_case ) )
# verify size
__a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __snake_case ) )
| 360
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
# using dfs for finding eulerian path traversal
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Optional[int]=None ):
"""simple docstring"""
__a =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__a , __a =True, True
__a =dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def UpperCamelCase_( _snake_case : int , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =0
__a =-1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__a =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Tuple ):
"""simple docstring"""
__a =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__a , __a =check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
__a =1
if check == 2:
__a =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
__a =dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__a ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__a ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__a ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
__a ={
1: [],
2: []
# all degree is zero
}
__a =10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 361
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 0
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( _snake_case : str , _snake_case : dict ):
"""simple docstring"""
__a =BeautifulSoup(requests.get(_snake_case , params=_snake_case ).content , 'html.parser' )
__a =soup.find('div' , attrs={'class': 'gs_ri'} )
__a =div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCAmelCase : Tuple = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2_018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 362
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = None , __snake_case = None , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'shortest_edge': 224}
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a =get_size_dict(__snake_case , param_name='crop_size' )
__a =do_resize
__a =size
__a =do_center_crop
__a =crop_size
__a =resample
__a =do_rescale
__a =rescale_factor
__a =do_normalize
__a =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BILINEAR , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" in size:
__a =get_resize_output_image_size(__snake_case , size['shortest_edge'] , default_to_square=__snake_case )
elif "height" in size and "width" in size:
__a =(size['height'], size['width'])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> str:
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a =to_numpy_array(__snake_case )
if do_resize:
__a =self.resize(image=__snake_case , size=__snake_case , resample=__snake_case )
if do_center_crop:
__a =self.center_crop(__snake_case , size=__snake_case )
if do_rescale:
__a =self.rescale(image=__snake_case , scale=__snake_case )
if do_normalize:
__a =self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case )
__a =to_channel_dimension_format(__snake_case , __snake_case )
return image
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =resample if resample is not None else self.resample
__a =do_center_crop if do_center_crop is not None else self.do_center_crop
__a =do_rescale if do_rescale is not None else self.do_rescale
__a =rescale_factor if rescale_factor is not None else self.rescale_factor
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =image_mean if image_mean is not None else self.image_mean
__a =image_std if image_std is not None else self.image_std
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =crop_size if crop_size is not None else self.crop_size
__a =get_size_dict(__snake_case , param_name='crop_size' )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__a =make_batched(__snake_case )
__a =[
[
self._preprocess_image(
image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , )
for img in video
]
for video in videos
]
__a ={'pixel_values': videos}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 363
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308
| 0
|
import heapq
def UpperCamelCase_( _snake_case : dict ):
"""simple docstring"""
__a =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_snake_case , [-1 * len(_snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
__a =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__a =heapq.heappop(_snake_case )[1][0]
chosen_vertices.add(_snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__a =elem[1][1].index(_snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 364
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 0
|
from math import ceil
def UpperCamelCase_( _snake_case : int , _snake_case : Any ):
"""simple docstring"""
__a =list(range(0 , _snake_case ) )
__a =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__a =[]
for i in device_map_blocks:
if device_map_blocks.count(_snake_case ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_snake_case )
# Missing blocks
__a =[i for i in blocks if i not in device_map_blocks]
__a =[i for i in device_map_blocks if i not in blocks]
if len(_snake_case ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_snake_case ) )
if len(_snake_case ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_snake_case ) )
if len(_snake_case ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_snake_case ) )
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =list(range(_snake_case ) )
__a =int(ceil(n_layers / len(_snake_case ) ) )
__a =[layers[i : i + n_blocks] for i in range(0 , _snake_case , _snake_case )]
return dict(zip(_snake_case , _snake_case ) )
| 365
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE = 1_0
def __magic_name__ ( self , **__snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a ={
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**__snake_case )
return config
def __magic_name__ ( self ) -> int:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__a =self.dummy_model()
__a =self.dummy_sample_deter * scheduler.init_noise_sigma
__a =sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
__a =scheduler.scale_model_input(__snake_case , __snake_case )
__a =model(__snake_case , __snake_case )
__a =scheduler.step(__snake_case , __snake_case , __snake_case )
__a =output.prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config(prediction_type='v_prediction' )
__a =scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__a =self.dummy_model()
__a =self.dummy_sample_deter * scheduler.init_noise_sigma
__a =sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
__a =scheduler.scale_model_input(__snake_case , __snake_case )
__a =model(__snake_case , __snake_case )
__a =scheduler.step(__snake_case , __snake_case , __snake_case )
__a =output.prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
__a =self.dummy_model()
__a =self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__a =scheduler.scale_model_input(__snake_case , __snake_case )
__a =model(__snake_case , __snake_case )
__a =scheduler.step(__snake_case , __snake_case , __snake_case )
__a =output.prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case , use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
__a =self.dummy_model()
__a =self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
__a =sample.to(__snake_case )
for t in scheduler.timesteps:
__a =scheduler.scale_model_input(__snake_case , __snake_case )
__a =model(__snake_case , __snake_case )
__a =scheduler.step(__snake_case , __snake_case , __snake_case )
__a =output.prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 366
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 0
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__a =os.path.abspath(_snake_case )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__a =torch.load(_snake_case , map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__a =convert_pytorch_state_dict_to_flax(_snake_case , _snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__a =convert_pytorch_sharded_state_dict_to_flax(_snake_case , _snake_case )
return flax_state_dict
def UpperCamelCase_( _snake_case : Tuple[str] , _snake_case : np.ndarray , _snake_case : Dict[str, jnp.ndarray] , _snake_case : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_snake_case : Tuple[str] ) -> bool:
return len(set(_snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__a =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__a =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__a =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__a =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__a =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_snake_case ):
__a =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__a =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_snake_case ):
__a =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__a =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__a =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__a =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__a =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__a =pt_tuple_key[-2] + '_v'
if name is not None:
__a =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_( _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
__a ={k: v.numpy() for k, v in pt_state_dict.items()}
__a =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__a =flax_model.params['params']
else:
__a =flax_model.params
__a =flatten_dict(_snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_snake_case )
__a ={}
__a =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a =rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
__a =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def UpperCamelCase_( _snake_case : int , _snake_case : str ):
"""simple docstring"""
import torch
# Load the index
__a ={}
for shard_file in shard_filenames:
# load using msgpack utils
__a =torch.load(_snake_case )
__a ={k: v.numpy() for k, v in pt_state_dict.items()}
__a =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a =flax_model.params['params']
__a =flatten_dict(_snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__a =flax_model.params
__a =flatten_dict(_snake_case )
__a =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a =rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
__a =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
if "var" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =os.path.abspath(_snake_case )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__a =getattr(_snake_case , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_snake_case , 'rb' ) as state_f:
try:
__a =from_bytes(_snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__a =flatten_dict(jax.tree_util.tree_map(lambda _snake_case : x.dtype == jnp.bfloataa , _snake_case ) ).values()
if any(_snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__a =jax.tree_util.tree_map(
lambda _snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _snake_case )
__a =flatten_dict(_snake_case )
__a =pt_model.state_dict()
__a =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__a =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__a =[]
__a =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__a =flax_key_tuple[0] == pt_model.base_model_prefix
__a ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__a =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_snake_case ) not in pt_model_dict:
# conv layer
__a =flax_key_tuple[:-1] + ('weight',)
__a =jnp.transpose(_snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_snake_case ) not in pt_model_dict:
# linear layer
__a =flax_key_tuple[:-1] + ('weight',)
__a =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__a =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__a =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__a ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__a ='.'.join(_snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__a ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__a =key.split('.' )
__a =None
if key_components[-3::2] == ["parametrizations", "original0"]:
__a =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__a =key_components[-2] + '_v'
if name is not None:
__a =key_components[:-3] + [name]
__a ='.'.join(_snake_case )
__a =key
if flax_key in special_pt_names:
__a =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__a =np.asarray(_snake_case ) if not isinstance(_snake_case , np.ndarray ) else flax_tensor
__a =torch.from_numpy(_snake_case )
# remove from missing keys
missing_keys.remove(_snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_snake_case )
pt_model.load_state_dict(_snake_case )
# re-transform missing_keys to list
__a =list(_snake_case )
if len(_snake_case ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_snake_case ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 367
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = (UnCLIPScheduler,)
def __magic_name__ ( self , **__snake_case ) -> Dict:
'''simple docstring'''
__a ={
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**__snake_case )
return config
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__snake_case )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__snake_case , prev_timestep=__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config(variance_type='fixed_small_log' )
__a =scheduler_class(**__snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config(variance_type='learned_range' )
__a =scheduler_class(**__snake_case )
__a =0.5
assert scheduler._get_variance(1 , predicted_variance=__snake_case ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__snake_case ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__snake_case ) - -0.001_0011 < 1e-5
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
__a =scheduler.timesteps
__a =self.dummy_model()
__a =self.dummy_sample_deter
__a =torch.manual_seed(0 )
for i, t in enumerate(__snake_case ):
# 1. predict noise residual
__a =model(__snake_case , __snake_case )
# 2. predict previous mean of sample x_t-1
__a =scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
__a =pred_prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.scheduler_classes[0]
__a =self.get_scheduler_config()
__a =scheduler_class(**__snake_case )
scheduler.set_timesteps(25 )
__a =scheduler.timesteps
__a =self.dummy_model()
__a =self.dummy_sample_deter
__a =torch.manual_seed(0 )
for i, t in enumerate(__snake_case ):
# 1. predict noise residual
__a =model(__snake_case , __snake_case )
if i + 1 == timesteps.shape[0]:
__a =None
else:
__a =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a =scheduler.step(
__snake_case , __snake_case , __snake_case , prev_timestep=__snake_case , generator=__snake_case ).prev_sample
__a =pred_prev_sample
__a =torch.sum(torch.abs(__snake_case ) )
__a =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
| 368
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 0
|
from collections.abc import Callable
class __magic_name__ :
def __init__( self , __snake_case = None ) -> None:
'''simple docstring'''
__a =[]
# Stores indexes of each item for supporting updates and deletion.
__a ={}
# Stores current size of heap.
__a =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__a =key or (lambda __snake_case : x)
def __magic_name__ ( self , __snake_case ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __magic_name__ ( self , __snake_case ) -> int | None:
'''simple docstring'''
__a =int(2 * i + 1 )
return left if 0 < left < self.size else None
def __magic_name__ ( self , __snake_case ) -> int | None:
'''simple docstring'''
__a =int(2 * i + 2 )
return right if 0 < right < self.size else None
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a , __a =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__a , __a =self.arr[j], self.arr[i]
def __magic_name__ ( self , __snake_case , __snake_case ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =self._left(__snake_case )
__a =self._right(__snake_case )
__a =i
if left is not None and not self._cmp(__snake_case , __snake_case ):
__a =left
if right is not None and not self._cmp(__snake_case , __snake_case ):
__a =right
return valid_parent
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self._parent(__snake_case )
while parent is not None and not self._cmp(__snake_case , __snake_case ):
self._swap(__snake_case , __snake_case )
__a , __a =parent, self._parent(__snake_case )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self._get_valid_parent(__snake_case )
while valid_parent != index:
self._swap(__snake_case , __snake_case )
__a , __a =valid_parent, self._get_valid_parent(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__a =self.pos_map[item]
__a =[item, self.key(__snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__a =self.pos_map[item]
del self.pos_map[item]
__a =self.arr[self.size - 1]
__a =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__snake_case )] )
else:
__a =[item, self.key(__snake_case )]
__a =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __magic_name__ ( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def __magic_name__ ( self ) -> tuple | None:
'''simple docstring'''
__a =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =1.5
__a =int(factor * num_class_images )
__a =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_snake_case , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=_snake_case )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__a =client.query(text=_snake_case )
if len(_snake_case ) >= factor * num_class_images or num_images > 1e4:
break
else:
__a =int(factor * num_images )
__a =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_snake_case , aesthetic_weight=0.1 , )
__a =0
__a =0
__a =tqdm(desc='downloading real regularization images' , total=_snake_case )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
__a =class_images[count]
count += 1
try:
__a =requests.get(images['url'] )
if img.status_code == 200:
__a =Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser('' , add_help=_snake_case )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_snake_case , type=_snake_case )
parser.add_argument('--class_data_dir' , help='path to save images' , required=_snake_case , type=_snake_case )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_snake_case )
return parser.parse_args()
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 370
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 0
|
"""simple docstring"""
class __magic_name__ :
def __init__( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
__a =n
__a =[None] * self.n
__a =0 # index of the first element
__a =0
__a =0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def __magic_name__ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__a =data
__a =(self.rear + 1) % self.n
self.size += 1
return self
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
__a =self.array[self.front]
__a =None
__a =(self.front + 1) % self.n
self.size -= 1
return temp
| 371
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_weight - weights[index] , index + 1 )
return max(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self , _snake_case=2 , _snake_case=3 , _snake_case=64 , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = np.random.default_rng(_snake_case )
lowerCAmelCase = length
lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self , _snake_case ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case=None ):
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowerCAmelCase = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case=None ):
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowerCAmelCase = False
return x * self.a + self.b
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
lowerCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase )
lowerCAmelCase = datasets['train'].unique('label' )
lowerCAmelCase = {v: i for i, v in enumerate(_UpperCAmelCase )}
def tokenize_function(_UpperCAmelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
if "label" in examples:
lowerCAmelCase = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_UpperCAmelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=2 )
lowerCAmelCase = DataLoader(tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 309
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCamelCase : List[Any] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
__UpperCamelCase : List[Any] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
__UpperCamelCase : Optional[int] = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="auto" , _snake_case=-1 , _snake_case=0.9 , _snake_case=5 , _snake_case=5_00 , _snake_case="gpt2-large" , _snake_case=-1 , _snake_case=10_24 , _snake_case=25 , _snake_case=5 , _snake_case=True , _snake_case=25 , ):
"""simple docstring"""
lowerCAmelCase = compute_mauve(
p_text=_snake_case , q_text=_snake_case , p_features=_snake_case , q_features=_snake_case , p_tokens=_snake_case , q_tokens=_snake_case , num_buckets=_snake_case , pca_max_data=_snake_case , kmeans_explained_var=_snake_case , kmeans_num_redo=_snake_case , kmeans_max_iter=_snake_case , featurize_model_name=_snake_case , device_id=_snake_case , max_text_length=_snake_case , divergence_curve_discretization_size=_snake_case , mauve_scaling_factor=_snake_case , verbose=_snake_case , seed=_snake_case , )
return out
| 309
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 1
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__UpperCamelCase : Optional[List[str]] = None
__UpperCamelCase : Any = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__UpperCamelCase : Optional[int] = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class a :
snake_case__ = True
snake_case__ = None
# Automatically constructed
snake_case__ = "PIL.Image.Image"
snake_case__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
snake_case__ = field(default='''Image''' , init=a__ , repr=a__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = np.array(_snake_case )
if isinstance(_snake_case , _snake_case ):
return {"path": value, "bytes": None}
elif isinstance(_snake_case , _snake_case ):
return {"path": None, "bytes": value}
elif isinstance(_snake_case , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_snake_case )
elif isinstance(_snake_case , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_snake_case )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
lowerCAmelCase = {}
lowerCAmelCase ,lowerCAmelCase = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(_snake_case ):
lowerCAmelCase = PIL.Image.open(_snake_case )
else:
lowerCAmelCase = path.split('::' )[-1]
try:
lowerCAmelCase = string_to_dict(_snake_case , config.HUB_DATASETS_URL )['repo_id']
lowerCAmelCase = token_per_repo_id.get(_snake_case )
except ValueError:
lowerCAmelCase = None
with xopen(_snake_case , 'rb' , use_auth_token=_snake_case ) as f:
lowerCAmelCase = BytesIO(f.read() )
lowerCAmelCase = PIL.Image.open(bytes_ )
else:
lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
lowerCAmelCase = storage.field('bytes' )
else:
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
lowerCAmelCase = storage.field('path' )
else:
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCAmelCase = pa.array(
[encode_np_array(np.array(_snake_case ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCAmelCase = pa.array([None] * len(_snake_case ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_snake_case , self.pa_type )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_snake_case ):
with xopen(_snake_case , 'rb' ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(_snake_case ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_snake_case , self.pa_type )
def _SCREAMING_SNAKE_CASE ():
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : "PIL.Image.Image" ):
lowerCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
lowerCAmelCase = image.format
else:
lowerCAmelCase = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_UpperCAmelCase , format=_UpperCAmelCase )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : "PIL.Image.Image" ):
if hasattr(_UpperCAmelCase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
lowerCAmelCase = array.dtype
lowerCAmelCase = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
lowerCAmelCase = dtype.kind
lowerCAmelCase = dtype.itemsize
lowerCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCAmelCase = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCAmelCase = dtype_byteorder + dtype_kind + str(_UpperCAmelCase )
lowerCAmelCase = np.dtype(_UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowerCAmelCase = PIL.Image.fromarray(array.astype(_UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
lowerCAmelCase ,lowerCAmelCase = first_non_null_value(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCAmelCase , np.ndarray ):
lowerCAmelCase = no_op_if_value_is_null(_UpperCAmelCase )
return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs]
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
lowerCAmelCase = no_op_if_value_is_null(_UpperCAmelCase )
return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 309
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__UpperCamelCase : Dict = logging.getLogger(__name__)
__UpperCamelCase : Dict = {'''facebook/bart-base''': BartForConditionalGeneration}
__UpperCamelCase : List[str] = {'''facebook/bart-base''': BartTokenizer}
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str="cpu" ):
lowerCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
lowerCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = 0
return huggingface_model, tokenizer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
model.eval()
lowerCAmelCase = None
lowerCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
lowerCAmelCase = 'My friends are cool but they eat too many carbs.'
lowerCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device )
lowerCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
lowerCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
lowerCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
lowerCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = parse_args()
lowerCAmelCase = 5
lowerCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase = torch.device(args.device )
lowerCAmelCase ,lowerCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
lowerCAmelCase = args.max_length
if args.num_beams:
lowerCAmelCase = args.num_beams
if args.output_file_path:
lowerCAmelCase = args.output_file_path
else:
lowerCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 309
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase : Optional[Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=8 ):
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase = latents.to(_snake_case )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase ,lowerCAmelCase = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 1_00 , _snake_case = 4.0 , _snake_case = 1 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.unet.config.in_channels
lowerCAmelCase ,lowerCAmelCase = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {'image_embeds': image_embeds}
lowerCAmelCase = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase ,lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase ,lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(_snake_case , force_not_quantize=_snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 309
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 6_55_36 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case )
lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case )
lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase = block_out_channels[0] * 4
lowerCAmelCase = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
lowerCAmelCase = nn.ModuleList([] )
lowerCAmelCase = None
lowerCAmelCase = nn.ModuleList([] )
lowerCAmelCase = None
# down
lowerCAmelCase = in_channels
for i, down_block_type in enumerate(_snake_case ):
lowerCAmelCase = output_channel
lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase = i == len(_snake_case ) - 1
lowerCAmelCase = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case )
# mid
lowerCAmelCase = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
lowerCAmelCase = list(reversed(_snake_case ) )
lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase = out_channels
else:
lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case ):
lowerCAmelCase = output_channel
lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(_snake_case ) - 1 else final_upsample_channels
)
lowerCAmelCase = i == len(_snake_case ) - 1
lowerCAmelCase = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case )
lowerCAmelCase = output_channel
# out
lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = timestep
if not torch.is_tensor(_snake_case ):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
lowerCAmelCase = timesteps[None].to(sample.device )
lowerCAmelCase = self.time_proj(_snake_case )
if self.config.use_timestep_embedding:
lowerCAmelCase = self.time_mlp(_snake_case )
else:
lowerCAmelCase = timestep_embed[..., None]
lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase = ()
for downsample_block in self.down_blocks:
lowerCAmelCase ,lowerCAmelCase = downsample_block(hidden_states=_snake_case , temb=_snake_case )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase = self.mid_block(_snake_case , _snake_case )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase = down_block_res_samples[-1:]
lowerCAmelCase = down_block_res_samples[:-1]
lowerCAmelCase = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case )
# 5. post-process
if self.out_block:
lowerCAmelCase = self.out_block(_snake_case , _snake_case )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case )
| 309
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a ( a__ ):
snake_case__ = '''yolos'''
def __init__( self , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=[5_12, 8_64] , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=1_00 , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = num_detection_tokens
lowerCAmelCase = use_mid_position_embeddings
lowerCAmelCase = auxiliary_loss
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
class a ( a__ ):
snake_case__ = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 309
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_UpperCAmelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "bias" in name:
lowerCAmelCase = 'bias'
else:
lowerCAmelCase = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = full_name.split('conv_layers.' )[-1]
lowerCAmelCase = name.split('.' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=True ):
if config_path is not None:
lowerCAmelCase = HubertConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCAmelCase )
lowerCAmelCase = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , )
lowerCAmelCase = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
lowerCAmelCase = HubertForCTC(_UpperCAmelCase )
else:
lowerCAmelCase = HubertModel(_UpperCAmelCase )
if is_finetuned:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 309
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 1
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowerCAmelCase = dataset
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
lowerCAmelCase = os.path.join(self.tmpdirname , 'dataset' )
lowerCAmelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
lowerCAmelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
lowerCAmelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , 'wb' ) )
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
lowerCAmelCase = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , _snake_case )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowerCAmelCase = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
lowerCAmelCase = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='pt' , )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , _snake_case ) # check for doc token related keys in dictionary.
| 309
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 100 ):
lowerCAmelCase = (n * (n + 1) // 2) ** 2
lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 309
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( a__ , unittest.TestCase ):
snake_case__ = True
snake_case__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxBertModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxBertModel.from_pretrained('bert-base-cased' )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 309
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 1
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class a ( a__ ):
snake_case__ = 1
snake_case__ = 2
snake_case__ = 3
snake_case__ = 4
snake_case__ = 5
@dataclass
class a ( a__ ):
snake_case__ = 42
class a :
snake_case__ = SCHEDULER_CONFIG_NAME
snake_case__ = ['''dtype''']
snake_case__ = []
snake_case__ = True
@classmethod
def UpperCamelCase__ ( cls , _snake_case = None , _snake_case = None , _snake_case=False , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , **_snake_case , )
lowerCAmelCase ,lowerCAmelCase = cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case )
if hasattr(_snake_case , 'create_state' ) and getattr(_snake_case , 'has_state' , _snake_case ):
lowerCAmelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , **_snake_case ):
"""simple docstring"""
self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase = [
getattr(_snake_case , _snake_case ) for c in compatible_classes_str if hasattr(_snake_case , _snake_case )
]
return compatible_classes
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Tuple[int] ):
assert len(_UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCAmelCase ) - x.ndim) ) , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any]=0.999 , _UpperCAmelCase : Union[str, Any]=jnp.floataa ):
def alpha_bar(_UpperCAmelCase : Optional[Any] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase = []
for i in range(_UpperCAmelCase ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCAmelCase ) / alpha_bar(_UpperCAmelCase ) , _UpperCAmelCase ) )
return jnp.array(_UpperCAmelCase , dtype=_UpperCAmelCase )
@flax.struct.dataclass
class a :
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
@classmethod
def UpperCamelCase__ ( cls , _snake_case ):
"""simple docstring"""
lowerCAmelCase = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase = 1.0 - betas
lowerCAmelCase = jnp.cumprod(_snake_case , axis=0 )
return cls(
alphas=_snake_case , betas=_snake_case , alphas_cumprod=_snake_case , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ):
lowerCAmelCase = state.alphas_cumprod
lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase = sqrt_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ):
lowerCAmelCase ,lowerCAmelCase = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ):
lowerCAmelCase ,lowerCAmelCase = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 309
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCamelCase : int = {
'''camembert-base''': 512,
}
__UpperCamelCase : List[Any] = '''▁'''
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=["<s>NOTUSED", "</s>NOTUSED"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowerCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
lowerCAmelCase = len(self.fairseq_tokens_to_ids )
lowerCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_snake_case ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_snake_case )
lowerCAmelCase = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 309
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : Tuple = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : Any = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__UpperCamelCase : Optional[Any] = args.per_device_eval_batch_size
__UpperCamelCase : Union[str, Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : List[str] = True
__UpperCamelCase : int = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__UpperCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__UpperCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__UpperCamelCase : Tuple = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Optional[int] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Tuple = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCAmelCase )
# start time
lowerCAmelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase = time.time()
lowerCAmelCase = end_time - start_time
lowerCAmelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : List[str] = raw_datasets['''validation'''].column_names
__UpperCamelCase : Any = '''question''' if '''question''' in column_names else column_names[0]
__UpperCamelCase : Optional[int] = '''context''' if '''context''' in column_names else column_names[1]
__UpperCamelCase : Optional[int] = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : List[Any] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__UpperCamelCase : Optional[int] = min(args.max_seq_length, tokenizer.model_max_length)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__UpperCamelCase : Any = raw_datasets['''validation''']
# Validation Feature Creation
__UpperCamelCase : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__UpperCamelCase : Any = default_data_collator
__UpperCamelCase : Tuple = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__UpperCamelCase : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase = postprocess_qa_predictions(
examples=_UpperCAmelCase , features=_UpperCAmelCase , predictions=_UpperCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase , label_ids=_UpperCAmelCase )
__UpperCamelCase : int = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : int = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__UpperCamelCase : Tuple = 0.0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Tuple = timeit.default_timer()
__UpperCamelCase : Tuple = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase ,__UpperCamelCase : List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase ,__UpperCamelCase : str = outputs
__UpperCamelCase : List[Any] = torch.tensor(start_logits)
__UpperCamelCase : Optional[int] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : List[Any] = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : List[str] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__UpperCamelCase : List[str] = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 309
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 1
|
"""simple docstring"""
__UpperCamelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_58_18,
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(_UpperCAmelCase )}'
)
raise ValueError(_UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 1
|
"""simple docstring"""
import enum
import shutil
import sys
__UpperCamelCase ,__UpperCamelCase : str = shutil.get_terminal_size()
__UpperCamelCase : List[Any] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class a ( enum.Enum ):
snake_case__ = 0
snake_case__ = 1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int]="" ):
sys.stdout.write(str(_UpperCAmelCase ) + end )
sys.stdout.flush()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
forceWrite('\r' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def _SCREAMING_SNAKE_CASE ():
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def _SCREAMING_SNAKE_CASE ():
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 309
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 1
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase : Optional[int] = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__UpperCamelCase : Union[str, Any] = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__UpperCamelCase : List[str] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 , _snake_case="binary" , _snake_case=None , _snake_case="warn" , ):
"""simple docstring"""
lowerCAmelCase = recall_score(
_snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case , zero_division=_snake_case , )
return {"recall": float(_snake_case ) if score.size == 1 else score}
| 309
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
# Initialise PyTorch model
lowerCAmelCase = BertConfig.from_json_file(_UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
lowerCAmelCase = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 309
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
snake_case__ = (DPMSolverSinglestepScheduler,)
snake_case__ = (('''num_inference_steps''', 2_5),)
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase ,lowerCAmelCase = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , _snake_case=None , **_snake_case ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = 50
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='dpmsolver++' , solver_order=_snake_case , solver_type=_snake_case , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
lowerCAmelCase = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case )
self.check_over_configs(lower_order_final=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case )
self.check_over_configs(variance_type='learned_range' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(use_karras_sigmas=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_snake_case )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 309
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 1
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[list[str]] , _UpperCAmelCase : int , ):
lowerCAmelCase = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 309
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 1
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
lowerCAmelCase = os.path.abspath(_UpperCAmelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
lowerCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
lowerCAmelCase = convert_pytorch_state_dict_to_flax(_UpperCAmelCase , _UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCAmelCase = convert_pytorch_sharded_state_dict_to_flax(_UpperCAmelCase , _UpperCAmelCase )
return flax_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, jnp.ndarray] , _UpperCAmelCase : str , ):
def is_key_or_prefix_key_in_dict(_UpperCAmelCase : Tuple[str] ) -> bool:
return len(set(_UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCAmelCase = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCAmelCase = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCAmelCase = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ):
lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCAmelCase = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCAmelCase = pt_tuple_key[-2] + '_v'
if name is not None:
lowerCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
# convert pytorch tensor to numpy
lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCAmelCase = flax_model.params['params']
else:
lowerCAmelCase = flax_model.params
lowerCAmelCase = flatten_dict(_UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowerCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase ,lowerCAmelCase = rename_key_and_reshape_tensor(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# add model prefix if necessary
lowerCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ):
import torch
# Load the index
lowerCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase = flax_model.params['params']
lowerCAmelCase = flatten_dict(_UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
lowerCAmelCase = flax_model.params
lowerCAmelCase = flatten_dict(_UpperCAmelCase )
lowerCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowerCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase ,lowerCAmelCase = rename_key_and_reshape_tensor(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# add model prefix if necessary
lowerCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
continue
if "var" in flax_key[-1]:
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
lowerCAmelCase = os.path.abspath(_UpperCAmelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
lowerCAmelCase = getattr(_UpperCAmelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCAmelCase , 'rb' ) as state_f:
try:
lowerCAmelCase = from_bytes(_UpperCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , _UpperCAmelCase ) ).values()
if any(_UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowerCAmelCase = jax.tree_util.tree_map(
lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCAmelCase )
lowerCAmelCase = flatten_dict(_UpperCAmelCase )
lowerCAmelCase = pt_model.state_dict()
lowerCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
lowerCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCAmelCase = []
lowerCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCAmelCase = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCAmelCase ) not in pt_model_dict:
# conv layer
lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
lowerCAmelCase = jnp.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ) not in pt_model_dict:
# linear layer
lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCAmelCase = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
lowerCAmelCase = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
lowerCAmelCase = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCAmelCase = '.'.join(_UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCAmelCase = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCAmelCase = key_components[-2] + '_v'
if name is not None:
lowerCAmelCase = key_components[:-3] + [name]
lowerCAmelCase = '.'.join(_UpperCAmelCase )
lowerCAmelCase = key
if flax_key in special_pt_names:
lowerCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCAmelCase = np.asarray(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , np.ndarray ) else flax_tensor
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase )
# remove from missing keys
missing_keys.remove(_UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCAmelCase )
pt_model.load_state_dict(_UpperCAmelCase )
# re-transform missing_keys to list
lowerCAmelCase = list(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCAmelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 309
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a ( a__ ):
snake_case__ = '''fnet'''
def __init__( self , _snake_case=3_20_00 , _snake_case=7_68 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu_new" , _snake_case=0.1 , _snake_case=5_12 , _snake_case=4 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=False , _snake_case=5_12 , _snake_case=3 , _snake_case=1 , _snake_case=2 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 309
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 1
|
"""simple docstring"""
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Callable[[float], float] , _UpperCAmelCase : float , _UpperCAmelCase : float ):
lowerCAmelCase = a
lowerCAmelCase = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowerCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
lowerCAmelCase = mid
else:
lowerCAmelCase = mid
lowerCAmelCase = start + (end - start) / 2.0
return mid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a :
def __init__( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="resnet50" , _snake_case=3 , _snake_case=32 , _snake_case=3 , _snake_case=True , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = out_indices if out_indices is not None else [4]
lowerCAmelCase = stage_names
lowerCAmelCase = out_features
lowerCAmelCase = backbone
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = is_training
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TimmBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (TimmBackbone,) if is_torch_available() else ()
snake_case__ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TimmBackboneModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'resnet18'
lowerCAmelCase = 'microsoft/resnet-18'
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , use_timm_backbone=_snake_case )
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , use_timm_backbone=_snake_case , out_indices=[1, 2, 3] )
lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Safetensors is not supported by timm.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase = self.all_model_classes[0]
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
lowerCAmelCase = model(**_snake_case )
lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_snake_case )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(**_snake_case )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase = copy.deepcopy(_snake_case )
lowerCAmelCase = None
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(**_snake_case )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase = copy.deepcopy(_snake_case )
lowerCAmelCase = False
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(**_snake_case )
| 309
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 1
|
"""simple docstring"""
import string
import numpy
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase )
class a :
snake_case__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
snake_case__ = numpy.vectorize(lambda a__ : x % 3_6 )
snake_case__ = numpy.vectorize(a__ )
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.modulus(_snake_case ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase = encrypt_key.shape[0]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.key_string.index(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.key_string[round(_snake_case )]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase = det % len(self.key_string )
lowerCAmelCase = len(self.key_string )
if greatest_common_divisor(_snake_case , len(self.key_string ) ) != 1:
lowerCAmelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase = chars[-1]
while len(_snake_case ) % self.break_key != 0:
chars.append(_snake_case )
return "".join(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.process_text(text.upper() )
lowerCAmelCase = ''
for i in range(0 , len(_snake_case ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase = text[i : i + self.break_key]
lowerCAmelCase = [self.replace_letters(_snake_case ) for char in batch]
lowerCAmelCase = numpy.array([vec] ).T
lowerCAmelCase = self.modulus(self.encrypt_key.dot(_snake_case ) ).T.tolist()[
0
]
lowerCAmelCase = ''.join(
self.replace_digits(_snake_case ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase = det % len(self.key_string )
lowerCAmelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase = i
break
lowerCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_snake_case ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.make_decrypt_key()
lowerCAmelCase = self.process_text(text.upper() )
lowerCAmelCase = ''
for i in range(0 , len(_snake_case ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase = text[i : i + self.break_key]
lowerCAmelCase = [self.replace_letters(_snake_case ) for char in batch]
lowerCAmelCase = numpy.array([vec] ).T
lowerCAmelCase = self.modulus(decrypt_key.dot(_snake_case ) ).T.tolist()[0]
lowerCAmelCase = ''.join(
self.replace_digits(_snake_case ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = int(input('Enter the order of the encryption key: ' ) )
lowerCAmelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = [int(_UpperCAmelCase ) for x in input().split()]
hill_matrix.append(_UpperCAmelCase )
lowerCAmelCase = HillCipher(numpy.array(_UpperCAmelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
lowerCAmelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
lowerCAmelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_UpperCAmelCase ) )
elif option == "2":
lowerCAmelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 309
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase : int = ['''small''', '''medium''', '''large''']
__UpperCamelCase : str = '''lm_head.decoder.weight'''
__UpperCamelCase : Dict = '''lm_head.weight'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = torch.load(_UpperCAmelCase )
lowerCAmelCase = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase : Optional[int] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__UpperCamelCase : str = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
| 1
|
"""simple docstring"""
import sys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
lowerCAmelCase = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
for chain_length in range(2 , _UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase = a + chain_length - 1
lowerCAmelCase = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase = cost
lowerCAmelCase = c
return matrix, sol
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ):
if i == j:
print('A' + str(_UpperCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase )
print(')' , end=' ' )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
lowerCAmelCase = len(_UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase ,lowerCAmelCase = matrix_chain_order(_UpperCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 309
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 1
|
"""simple docstring"""
__UpperCamelCase : int = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float ):
assert type(_UpperCAmelCase ) in (int, float) and decimal == int(_UpperCAmelCase )
lowerCAmelCase = int(_UpperCAmelCase )
lowerCAmelCase = ''
lowerCAmelCase = False
if decimal < 0:
lowerCAmelCase = True
decimal *= -1
while decimal > 0:
lowerCAmelCase ,lowerCAmelCase = divmod(_UpperCAmelCase , 16 )
lowerCAmelCase = values[remainder] + hexadecimal
lowerCAmelCase = '0x' + hexadecimal
if negative:
lowerCAmelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309
| 1
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCamelCase : Union[str, Any] = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class a ( tr.AbstractTransform ):
def __init__( self , _snake_case = " " ):
"""simple docstring"""
lowerCAmelCase = sentence_delimiter
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return list(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for sent_idx, sentence in enumerate(_snake_case ):
chars.extend(self.process_string(_snake_case ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_snake_case ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCamelCase : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCamelCase : List[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCamelCase : Any = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__UpperCamelCase : List[str] = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__UpperCamelCase : Optional[int] = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
_snake_case , _snake_case , truth_transform=_snake_case , hypothesis_transform=_snake_case , )["wer"]
lowerCAmelCase = 0
lowerCAmelCase = 0
for prediction, reference in zip(_snake_case , _snake_case ):
lowerCAmelCase = jiwer.compute_measures(
_snake_case , _snake_case , truth_transform=_snake_case , hypothesis_transform=_snake_case , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 309
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : int = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__UpperCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/diffusers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 309
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowerCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ):
lowerCAmelCase = 384
if "tiny" in model_name:
lowerCAmelCase = [3, 3, 9, 3]
lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [128, 256, 512, 1024]
lowerCAmelCase = 512
if "large" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [192, 384, 768, 1536]
lowerCAmelCase = 768
if "xlarge" in model_name:
lowerCAmelCase = [3, 3, 27, 3]
lowerCAmelCase = [256, 512, 1024, 2048]
lowerCAmelCase = 1024
# set label information
lowerCAmelCase = 150
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = 'ade20k-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = ConvNextConfig(
depths=_UpperCAmelCase , hidden_sizes=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase = UperNetConfig(
backbone_config=_UpperCAmelCase , auxiliary_in_channels=_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
lowerCAmelCase = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase = model_name_to_url[model_name]
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['state_dict']
lowerCAmelCase = get_upernet_config(_UpperCAmelCase )
lowerCAmelCase = UperNetForSemanticSegmentation(_UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
if "bn" in key:
lowerCAmelCase = key.replace('bn' , 'batch_norm' )
lowerCAmelCase = val
# rename keys
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# verify on image
lowerCAmelCase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SegformerImageProcessor()
lowerCAmelCase = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase = model(_UpperCAmelCase )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCAmelCase = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
lowerCAmelCase = 'huggingface/label-files'
if "o365" in model_name:
lowerCAmelCase = 366
lowerCAmelCase = 'object365-id2label.json'
else:
lowerCAmelCase = 91
lowerCAmelCase = 'coco-detection-id2label.json'
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str ):
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ):
lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:dim, :]
lowerCAmelCase = in_proj_bias[: dim]
lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase = in_proj_weight[
-dim :, :
]
lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
# transformer decoder self-attention layers
lowerCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:hidden_size, :]
lowerCAmelCase = in_proj_bias[:hidden_size]
lowerCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size:, :]
lowerCAmelCase = in_proj_bias[-hidden_size:]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Any ):
lowerCAmelCase = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
lowerCAmelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
lowerCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
if "input_proj" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_UpperCAmelCase )
# load image processor
lowerCAmelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCAmelCase = prepare_img()
lowerCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase = encoding['pixel_values']
lowerCAmelCase = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCAmelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
lowerCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
lowerCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case )
lowerCAmelCase = {}
def UpperCamelCase__ ( self , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = super().add_tokens(_snake_case , *_snake_case , **_snake_case )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def UpperCamelCase__ ( self , _snake_case , *_snake_case , _snake_case=1 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(_snake_case , *_snake_case , **_snake_case )
output.append(_snake_case )
else:
lowerCAmelCase = []
for i in range(_snake_case ):
lowerCAmelCase = placeholder_token + F'_{i}'
self.try_adding_tokens(_snake_case , *_snake_case , **_snake_case )
output.append(_snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
lowerCAmelCase = output
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=1.0 ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = []
for i in range(len(_snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase = self.token_map[placeholder_token]
lowerCAmelCase = tokens[: 1 + int(len(_snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase = copy.copy(_snake_case )
random.shuffle(_snake_case )
lowerCAmelCase = text.replace(_snake_case , ' '.join(_snake_case ) )
return text
def __call__( self , _snake_case , *_snake_case , _snake_case=False , _snake_case=1.0 , **_snake_case ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_snake_case , vector_shuffle=_snake_case , prop_tokens_to_load=_snake_case ) , *_snake_case , **_snake_case , )
def UpperCamelCase__ ( self , _snake_case , *_snake_case , _snake_case=False , _snake_case=1.0 , **_snake_case ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_snake_case , vector_shuffle=_snake_case , prop_tokens_to_load=_snake_case ) , *_snake_case , **_snake_case , )
| 309
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
"""simple docstring"""
import sys
from collections import defaultdict
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = []
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = pos
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase = 2 * start + 1
else:
lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase ,lowerCAmelCase = heap[smallest_child], positions[smallest_child]
lowerCAmelCase ,lowerCAmelCase = (
heap[start],
positions[start],
)
lowerCAmelCase ,lowerCAmelCase = temp, tempa
lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _snake_case )
self.top_to_bottom(_snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = position[index]
while index != 0:
lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase = heap[parent]
lowerCAmelCase = position[parent]
self.set_position(position[parent] , _snake_case )
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(_snake_case , _snake_case )
break
lowerCAmelCase = parent
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(_snake_case , 0 )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = len(_snake_case ) // 2 - 1
for i in range(_snake_case , -1 , -1 ):
self.top_to_bottom(_snake_case , _snake_case , len(_snake_case ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = positions[0]
lowerCAmelCase = sys.maxsize
self.top_to_bottom(_snake_case , 0 , len(_snake_case ) , _snake_case )
return temp
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = Heap()
lowerCAmelCase = [0] * len(_UpperCAmelCase )
lowerCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = 1
lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase = 0
lowerCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
lowerCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase : List[str] = int(input('''Enter number of edges: ''').strip())
__UpperCamelCase : str = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 309
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 1
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCamelCase : List[str] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCamelCase : Optional[int] = json.load(f)
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(_snake_case ).to(_snake_case )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = F'facebook/wmt19-{pair}'
lowerCAmelCase = self.get_tokenizer(_snake_case )
lowerCAmelCase = self.get_model(_snake_case )
lowerCAmelCase = bleu_data[pair]['src']
lowerCAmelCase = bleu_data[pair]['tgt']
lowerCAmelCase = tokenizer(_snake_case , return_tensors='pt' , truncation=_snake_case , padding='longest' ).to(_snake_case )
lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase = tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
lowerCAmelCase = calculate_bleu(_snake_case , _snake_case )
print(_snake_case )
self.assertGreaterEqual(scores['bleu'] , _snake_case )
| 309
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 309
| 1
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 309
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
lowerCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
lowerCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(_snake_case , return_tensors='np' )
lowerCAmelCase = processor(images=_snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = processor(text=_snake_case )
lowerCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(images=_snake_case , visual_prompt=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPSegProcessor(tokenizer=_snake_case , image_processor=_snake_case )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(_snake_case )
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 309
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float , _UpperCAmelCase : int ):
lowerCAmelCase = u
for i in range(1 , _UpperCAmelCase ):
lowerCAmelCase = temp * (u - i)
return temp
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = int(input('enter the numbers of values: ' ) )
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
y.append([] )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
y[i].append(_UpperCAmelCase )
lowerCAmelCase = 0
print('enter the values of parameters in a list: ' )
lowerCAmelCase = list(map(_UpperCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_UpperCAmelCase ):
lowerCAmelCase = float(input() )
lowerCAmelCase = int(input('enter the value to interpolate: ' ) )
lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _UpperCAmelCase ):
for j in range(n - i ):
lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase = y[0][0]
for i in range(1 , _UpperCAmelCase ):
summ += (ucal(_UpperCAmelCase , _UpperCAmelCase ) * y[0][i]) / math.factorial(_UpperCAmelCase )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 309
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class a ( a__ ):
snake_case__ = '''bridgetower_vision_model'''
def __init__( self , _snake_case=7_68 , _snake_case=12 , _snake_case=3 , _snake_case=16 , _snake_case=2_88 , _snake_case=1 , _snake_case=1E-05 , _snake_case=False , _snake_case=True , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = image_size
lowerCAmelCase = initializer_factor
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = stop_gradient
lowerCAmelCase = share_layernorm
lowerCAmelCase = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case )
if config_dict.get('model_type' ) == "bridgetower":
lowerCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class a ( a__ ):
snake_case__ = '''bridgetower_text_model'''
def __init__( self , _snake_case=5_02_65 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=1 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_14 , _snake_case=1 , _snake_case=1E-05 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = initializer_factor
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case )
if config_dict.get('model_type' ) == "bridgetower":
lowerCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class a ( a__ ):
snake_case__ = '''bridgetower'''
def __init__( self , _snake_case=True , _snake_case="gelu" , _snake_case=7_68 , _snake_case=1 , _snake_case=1E-05 , _snake_case=False , _snake_case="add" , _snake_case=12 , _snake_case=6 , _snake_case=False , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = kwargs.pop('text_config_dict' , _snake_case )
lowerCAmelCase = kwargs.pop('vision_config_dict' , _snake_case )
super().__init__(**_snake_case )
lowerCAmelCase = share_cross_modal_transformer_layers
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_size
lowerCAmelCase = initializer_factor
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = share_link_tower_layers
lowerCAmelCase = link_tower_type
lowerCAmelCase = num_attention_heads
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
lowerCAmelCase = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
lowerCAmelCase = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
lowerCAmelCase = BridgeTowerTextConfig(**_snake_case )
lowerCAmelCase = BridgeTowerVisionConfig(**_snake_case )
@classmethod
def UpperCamelCase__ ( cls , _snake_case , _snake_case , **_snake_case ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.text_config.to_dict()
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 309
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 1
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : str ):
lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
lowerCAmelCase = transform(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
return image
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
if "visual_encoder" in key:
lowerCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , _UpperCAmelCase )
if "blocks" in key:
lowerCAmelCase = re.sub(R'blocks' , 'layers' , _UpperCAmelCase )
if "attn" in key:
lowerCAmelCase = re.sub(R'attn' , 'self_attn' , _UpperCAmelCase )
if "norm1" in key:
lowerCAmelCase = re.sub(R'norm1' , 'layer_norm1' , _UpperCAmelCase )
if "norm2" in key:
lowerCAmelCase = re.sub(R'norm2' , 'layer_norm2' , _UpperCAmelCase )
if "encoder.norm" in key:
lowerCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , _UpperCAmelCase )
if "encoder.patch_embed.proj" in key:
lowerCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _UpperCAmelCase )
if "encoder.pos_embed" in key:
lowerCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _UpperCAmelCase )
if "encoder.cls_token" in key:
lowerCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _UpperCAmelCase )
if "self_attn" in key:
lowerCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , _UpperCAmelCase )
return key
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : str=None ):
if config_path is not None:
lowerCAmelCase = BlipConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowerCAmelCase = BlipForConditionalGeneration(_UpperCAmelCase ).eval()
lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
lowerCAmelCase = blip_decoder(pretrained=_UpperCAmelCase , image_size=384 , vit='base' )
lowerCAmelCase = pt_model.eval()
lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = rename_key(_UpperCAmelCase )
lowerCAmelCase = value
hf_model.load_state_dict(_UpperCAmelCase )
lowerCAmelCase = 384
lowerCAmelCase = load_demo_image(image_size=_UpperCAmelCase , device='cpu' )
lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCAmelCase = tokenizer(['a picture of'] ).input_ids
lowerCAmelCase = hf_model.generate(_UpperCAmelCase , _UpperCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowerCAmelCase = hf_model.generate(_UpperCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCAmelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
lowerCAmelCase = blip_vqa(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base' )
vqa_model.eval()
lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = rename_key(_UpperCAmelCase )
lowerCAmelCase = value
lowerCAmelCase = BlipForQuestionAnswering(_UpperCAmelCase )
hf_vqa_model.load_state_dict(_UpperCAmelCase )
lowerCAmelCase = ['How many dogs are in this image?']
lowerCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids
lowerCAmelCase = hf_vqa_model.generate(_UpperCAmelCase , _UpperCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
lowerCAmelCase = blip_itm(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base' )
itm_model.eval()
lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = rename_key(_UpperCAmelCase )
lowerCAmelCase = value
lowerCAmelCase = BlipForImageTextRetrieval(_UpperCAmelCase )
lowerCAmelCase = ['A picture of a woman with a dog sitting in a beach']
lowerCAmelCase = tokenizer(
_UpperCAmelCase , return_tensors='pt' , padding='max_length' , truncation=_UpperCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCAmelCase )
hf_itm_model.eval()
lowerCAmelCase = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
lowerCAmelCase = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__UpperCamelCase : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 309
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'laion/clap-htsat-unfused'
lowerCAmelCase = tempfile.mkdtemp()
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = ClapProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_feature_extractor(do_normalize=_snake_case , padding_value=1.0 )
lowerCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
lowerCAmelCase = floats_list((3, 10_00) )
lowerCAmelCase = feature_extractor(_snake_case , return_tensors='np' )
lowerCAmelCase = processor(audios=_snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
lowerCAmelCase = 'This is a test string'
lowerCAmelCase = processor(text=_snake_case )
lowerCAmelCase = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(_snake_case )
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 309
|
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__UpperCamelCase : List[Any] = False
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = 12
lowerCAmelCase = 12
lowerCAmelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
lowerCAmelCase = TransformeraDModel(**_snake_case )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_snake_case )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , transformer=_snake_case , scheduler=_snake_case , learned_classifier_free_sampling_embeddings=_snake_case , )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'teddy bear playing in the pool'
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=_snake_case , output_type='np' , return_dict=_snake_case , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , transformer=_snake_case , scheduler=_snake_case , learned_classifier_free_sampling_embeddings=_snake_case , )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'teddy bear playing in the pool'
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=_snake_case , output_type='np' , return_dict=_snake_case , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
lowerCAmelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
lowerCAmelCase = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_snake_case , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 309
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 1
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__UpperCamelCase : List[str] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__UpperCamelCase : Tuple = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__UpperCamelCase : Dict = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__UpperCamelCase : Union[str, Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__UpperCamelCase : List[str] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__UpperCamelCase : List[Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__UpperCamelCase : Union[str, Any] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase ,lowerCAmelCase = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) )
lowerCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCAmelCase ,lowerCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 100 ):
return (generate_random_hand() for _ in range(_UpperCAmelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ):
assert PokerHand(_UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ):
assert PokerHand(_UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
lowerCAmelCase = PokerHand(_UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ):
assert PokerHand(_UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS]
lowerCAmelCase = poker_hands.copy()
shuffle(_UpperCAmelCase )
lowerCAmelCase = chain(sorted(_UpperCAmelCase ) )
for index, hand in enumerate(_UpperCAmelCase ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE ():
# Test that five high straights are compared correctly.
lowerCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE ():
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCAmelCase = PokerHand('2C 4S AS 3D 5C' )
lowerCAmelCase = True
lowerCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE ():
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCAmelCase = 0
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'poker_hands.txt' )
with open(_UpperCAmelCase ) as file_hand:
for line in file_hand:
lowerCAmelCase = line[:14].strip()
lowerCAmelCase = line[15:].strip()
lowerCAmelCase ,lowerCAmelCase = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase )
lowerCAmelCase = player.compare_with(_UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 309
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.